Prusa Slicer 2.6.0
Loading...
Searching...
No Matches
Eigen::internal Namespace Reference

Namespaces

namespace  anonymous_namespace{Macros.h}
 
namespace  std_fallback
 

Classes

struct  abs2_impl
 
struct  abs2_impl_default
 
struct  abs2_impl_default< Scalar, true >
 
struct  abs2_retval
 
struct  abs_knowing_score
 
struct  abs_knowing_score< Scalar, typename scalar_score_coeff_op< Scalar >::Score_is_abs >
 
struct  accessors_level
 
struct  add_assign_op
 
struct  add_const
 
struct  add_const< T & >
 
struct  add_const_on_value_type
 
struct  add_const_on_value_type< T & >
 
struct  add_const_on_value_type< T * >
 
struct  add_const_on_value_type< T *const >
 
struct  add_const_on_value_type< T const *const >
 
struct  add_const_on_value_type_if_arithmetic
 
class  aligned_stack_memory_handler
 
struct  all_unroller
 
struct  all_unroller< Derived, 0 >
 
struct  all_unroller< Derived, Dynamic >
 
struct  always_void
 
class  AmbiVector
 
struct  any_unroller
 
struct  any_unroller< Derived, 0 >
 
struct  any_unroller< Derived, Dynamic >
 
struct  apply_rotation_in_the_plane_selector
 
struct  apply_rotation_in_the_plane_selector< Scalar, OtherScalar, SizeAtCompileTime, MinAlignment, true >
 
struct  arg_default_impl
 
struct  arg_default_impl< Scalar, true >
 
struct  arg_impl
 
struct  arg_retval
 
struct  assign_op
 
struct  assign_op< DstScalar, void >
 
struct  Assignment
 
struct  Assignment< DstXprType, CwiseBinaryOp< internal::scalar_product_op< ScalarBis, Scalar >, const CwiseNullaryOp< internal::scalar_constant_op< ScalarBis >, Plain >, const Product< Lhs, Rhs, DefaultProduct > >, AssignFunc, Dense2Dense >
 
struct  Assignment< DstXprType, Homogeneous< ArgType, Horizontal >, internal::assign_op< Scalar, typename ArgType::Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Homogeneous< ArgType, Vertical >, internal::assign_op< Scalar, typename ArgType::Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Inverse< ColPivHouseholderQR< MatrixType > >, internal::assign_op< typename DstXprType::Scalar, typename ColPivHouseholderQR< MatrixType >::Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Inverse< CompleteOrthogonalDecomposition< MatrixType > >, internal::assign_op< typename DstXprType::Scalar, typename CompleteOrthogonalDecomposition< MatrixType >::Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Inverse< FullPivHouseholderQR< MatrixType > >, internal::assign_op< typename DstXprType::Scalar, typename FullPivHouseholderQR< MatrixType >::Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Inverse< FullPivLU< MatrixType > >, internal::assign_op< typename DstXprType::Scalar, typename FullPivLU< MatrixType >::Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Inverse< PartialPivLU< MatrixType > >, internal::assign_op< typename DstXprType::Scalar, typename PartialPivLU< MatrixType >::Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Inverse< XprType >, internal::assign_op< typename DstXprType::Scalar, typename XprType::Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, AliasFreeProduct >, internal::add_assign_op< typename DstXprType::Scalar, typename Product< Lhs, Rhs, AliasFreeProduct >::Scalar >, Sparse2Dense >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, AliasFreeProduct >, internal::assign_op< typename DstXprType::Scalar, typename Product< Lhs, Rhs, AliasFreeProduct >::Scalar >, Sparse2Dense >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, AliasFreeProduct >, internal::sub_assign_op< typename DstXprType::Scalar, typename Product< Lhs, Rhs, AliasFreeProduct >::Scalar >, Sparse2Dense >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, DefaultProduct >, internal::add_assign_op< Scalar, typename Product< Lhs, Rhs, DefaultProduct >::Scalar >, Dense2Triangular >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, DefaultProduct >, internal::assign_op< Scalar, typename Product< Lhs, Rhs, DefaultProduct >::Scalar >, Dense2Triangular >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, DefaultProduct >, internal::sub_assign_op< Scalar, typename Product< Lhs, Rhs, DefaultProduct >::Scalar >, Dense2Triangular >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, Options >, internal::add_assign_op< Scalar, Scalar >, Dense2Dense, typename enable_if<(Options==DefaultProduct||Options==AliasFreeProduct)>::type >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, Options >, internal::assign_op< Scalar, Scalar >, Dense2Dense, typename enable_if<(Options==DefaultProduct||Options==AliasFreeProduct)>::type >
 
struct  Assignment< DstXprType, Product< Lhs, Rhs, Options >, internal::sub_assign_op< Scalar, Scalar >, Dense2Dense, typename enable_if<(Options==DefaultProduct||Options==AliasFreeProduct)>::type >
 
struct  Assignment< DstXprType, Solve< CwiseUnaryOp< internal::scalar_conjugate_op< typename DecType::Scalar >, const Transpose< const DecType > >, RhsType >, internal::assign_op< Scalar, Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Solve< DecType, RhsType >, internal::assign_op< Scalar, Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, Solve< DecType, RhsType >, internal::assign_op< Scalar, Scalar >, Sparse2Sparse >
 
struct  Assignment< DstXprType, Solve< Transpose< const DecType >, RhsType >, internal::assign_op< Scalar, Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, SolveWithGuess< DecType, RhsType, GuessType >, internal::assign_op< Scalar, Scalar >, Dense2Dense >
 
struct  Assignment< DstXprType, SparseQRMatrixQReturnType< SparseQRType >, internal::assign_op< typename DstXprType::Scalar, typename DstXprType::Scalar >, Sparse2Dense >
 
struct  Assignment< DstXprType, SparseQRMatrixQReturnType< SparseQRType >, internal::assign_op< typename DstXprType::Scalar, typename DstXprType::Scalar >, Sparse2Sparse >
 
struct  Assignment< DstXprType, SparseSymmetricPermutationProduct< MatrixType, Mode >, internal::assign_op< Scalar, typename MatrixType::Scalar >, Sparse2Sparse >
 
struct  Assignment< DstXprType, SrcXprType, Functor, Dense2Dense, Weak >
 
struct  Assignment< DstXprType, SrcXprType, Functor, Dense2Triangular >
 
struct  Assignment< DstXprType, SrcXprType, Functor, Diagonal2Dense >
 
struct  Assignment< DstXprType, SrcXprType, Functor, Diagonal2Sparse >
 
struct  Assignment< DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak >
 
struct  Assignment< DstXprType, SrcXprType, Functor, Sparse2Dense >
 
struct  Assignment< DstXprType, SrcXprType, Functor, Sparse2Sparse >
 
struct  Assignment< DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse >
 
struct  Assignment< DstXprType, SrcXprType, Functor, Triangular2Dense >
 
struct  Assignment< DstXprType, SrcXprType, Functor, Triangular2Triangular >
 
struct  assignment_from_xpr_op_product
 
struct  AssignmentKind
 
struct  AssignmentKind< DenseShape, BandShape >
 
struct  AssignmentKind< DenseShape, DenseShape >
 
struct  AssignmentKind< DenseShape, DiagonalShape >
 
struct  AssignmentKind< DenseShape, HomogeneousShape >
 
struct  AssignmentKind< DenseShape, PermutationShape >
 
struct  AssignmentKind< DenseShape, SparseShape >
 
struct  AssignmentKind< DenseShape, SparseTriangularShape >
 
struct  AssignmentKind< DenseShape, TriangularShape >
 
struct  AssignmentKind< SparseSelfAdjointShape, SparseShape >
 
struct  AssignmentKind< SparseShape, DiagonalShape >
 
struct  AssignmentKind< SparseShape, SparseSelfAdjointShape >
 
struct  AssignmentKind< SparseShape, SparseShape >
 
struct  AssignmentKind< SparseShape, SparseTriangularShape >
 
struct  AssignmentKind< TriangularShape, DenseShape >
 
struct  AssignmentKind< TriangularShape, TriangularShape >
 
class  BandMatrix
 Represents a rectangular matrix with a banded storage. More...
 
class  BandMatrixBase
 
class  BandMatrixWrapper
 
struct  BandShape
 
struct  binary_evaluator
 
struct  binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs >, IndexBased, IndexBased >
 
struct  binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs >, IndexBased, IteratorBased >
 
struct  binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs >, IteratorBased, IndexBased >
 
struct  binary_evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs >, IteratorBased, IteratorBased >
 
struct  binary_evaluator< CwiseBinaryOp< scalar_boolean_and_op, Lhs, Rhs >, IndexBased, IteratorBased >
 
struct  binary_evaluator< CwiseBinaryOp< scalar_boolean_and_op, Lhs, Rhs >, IteratorBased, IndexBased >
 
struct  binary_evaluator< CwiseBinaryOp< scalar_boolean_and_op, Lhs, Rhs >, IteratorBased, IteratorBased >
 
struct  binary_evaluator< CwiseBinaryOp< scalar_product_op< T1, T2 >, Lhs, Rhs >, IndexBased, IteratorBased >
 
struct  binary_evaluator< CwiseBinaryOp< scalar_product_op< T1, T2 >, Lhs, Rhs >, IteratorBased, IndexBased >
 
struct  binary_evaluator< CwiseBinaryOp< scalar_product_op< T1, T2 >, Lhs, Rhs >, IteratorBased, IteratorBased >
 
struct  binary_evaluator< CwiseBinaryOp< scalar_quotient_op< T1, T2 >, Lhs, Rhs >, IteratorBased, IndexBased >
 
struct  binary_op_base
 
struct  binary_result_of_select
 
struct  binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_std_result_type)>
 
struct  binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_tr1_result)>
 
struct  binary_sparse_evaluator
 
struct  bind1st_op
 
struct  bind2nd_op
 
class  blas_data_mapper
 
struct  blas_traits
 
struct  blas_traits< const T >
 
struct  blas_traits< CwiseBinaryOp< scalar_product_op< Scalar >, const CwiseNullaryOp< scalar_constant_op< Scalar >, Plain >, NestedXpr > >
 
struct  blas_traits< CwiseBinaryOp< scalar_product_op< Scalar >, const CwiseNullaryOp< scalar_constant_op< Scalar >, Plain1 >, const CwiseNullaryOp< scalar_constant_op< Scalar >, Plain2 > > >
 
struct  blas_traits< CwiseBinaryOp< scalar_product_op< Scalar >, NestedXpr, const CwiseNullaryOp< scalar_constant_op< Scalar >, Plain > > >
 
struct  blas_traits< CwiseUnaryOp< scalar_conjugate_op< Scalar >, NestedXpr > >
 
struct  blas_traits< CwiseUnaryOp< scalar_opposite_op< Scalar >, NestedXpr > >
 
struct  blas_traits< Transpose< NestedXpr > >
 
class  BlasLinearMapper
 
class  BlasVectorMapper
 
struct  block_evaluator
 
struct  block_evaluator< ArgType, BlockRows, BlockCols, InnerPanel, false >
 
struct  block_evaluator< ArgType, BlockRows, BlockCols, InnerPanel, true >
 
class  BlockImpl_dense
 
class  BlockImpl_dense< XprType, BlockRows, BlockCols, InnerPanel, true >
 
struct  CacheSizes
 
struct  cast_impl
 
struct  cast_return_type
 
struct  check_rows_cols_for_overflow
 
struct  check_rows_cols_for_overflow< Dynamic >
 
struct  check_transpose_aliasing_compile_time_selector
 
struct  check_transpose_aliasing_compile_time_selector< DestIsTransposed, CwiseBinaryOp< BinOp, DerivedA, DerivedB > >
 
struct  check_transpose_aliasing_run_time_selector
 
struct  check_transpose_aliasing_run_time_selector< Scalar, DestIsTransposed, CwiseBinaryOp< BinOp, DerivedA, DerivedB > >
 
struct  checkTransposeAliasing_impl
 
struct  checkTransposeAliasing_impl< Derived, OtherDerived, false >
 
struct  cholmod_configure_matrix
 
struct  cholmod_configure_matrix< double >
 
struct  cholmod_configure_matrix< std::complex< double > >
 
struct  coeff_visitor
 
struct  column_dfs_traits
 
struct  complex_schur_reduce_to_hessenberg
 
struct  complex_schur_reduce_to_hessenberg< MatrixType, false >
 
class  CompressedStorage
 
struct  compute_default_alignment
 
struct  compute_default_alignment< T, Dynamic >
 
struct  compute_default_alignment_helper
 
struct  compute_inverse
 
struct  compute_inverse< MatrixType, ResultType, 1 >
 
struct  compute_inverse< MatrixType, ResultType, 2 >
 
struct  compute_inverse< MatrixType, ResultType, 3 >
 
struct  compute_inverse< MatrixType, ResultType, 4 >
 
struct  compute_inverse_and_det_with_check
 
struct  compute_inverse_and_det_with_check< MatrixType, ResultType, 1 >
 
struct  compute_inverse_and_det_with_check< MatrixType, ResultType, 2 >
 
struct  compute_inverse_and_det_with_check< MatrixType, ResultType, 3 >
 
struct  compute_inverse_and_det_with_check< MatrixType, ResultType, 4 >
 
struct  compute_inverse_size4
 
struct  compute_inverse_size4< Architecture::SSE, double, MatrixType, ResultType >
 
struct  compute_inverse_size4< Architecture::SSE, float, MatrixType, ResultType >
 
class  compute_matrix_flags
 
struct  conditional
 
struct  conditional< false, Then, Else >
 
struct  conj_expr_if
 
struct  conj_helper
 
struct  conj_helper< Packet1cd, Packet1cd, false, true >
 
struct  conj_helper< Packet1cd, Packet1cd, true, false >
 
struct  conj_helper< Packet1cd, Packet1cd, true, true >
 
struct  conj_helper< Packet2cd, Packet2cd, false, true >
 
struct  conj_helper< Packet2cd, Packet2cd, true, false >
 
struct  conj_helper< Packet2cd, Packet2cd, true, true >
 
struct  conj_helper< Packet2cf, Packet2cf, false, true >
 
struct  conj_helper< Packet2cf, Packet2cf, true, false >
 
struct  conj_helper< Packet2cf, Packet2cf, true, true >
 
struct  conj_helper< Packet4cf, Packet4cf, false, true >
 
struct  conj_helper< Packet4cf, Packet4cf, true, false >
 
struct  conj_helper< Packet4cf, Packet4cf, true, true >
 
struct  conj_helper< RealScalar, std::complex< RealScalar >, false, Conj >
 
struct  conj_helper< Scalar, Scalar, false, false >
 
struct  conj_helper< std::complex< RealScalar >, RealScalar, Conj, false >
 
struct  conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, false, true >
 
struct  conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, true, false >
 
struct  conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, true, true >
 
struct  conj_if
 
struct  conj_if< false >
 
struct  conj_if< true >
 
struct  conj_impl
 
struct  conj_impl< Scalar, true >
 
struct  conj_retval
 
struct  conservative_resize_like_impl
 
struct  conservative_resize_like_impl< Derived, OtherDerived, true >
 
struct  conservative_sparse_sparse_product_selector
 
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor, ColMajor >
 
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor, RowMajor >
 
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor, ColMajor >
 
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor, RowMajor >
 
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor, ColMajor >
 
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor, RowMajor >
 
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor, ColMajor >
 
struct  conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor, RowMajor >
 
class  const_blas_data_mapper
 
struct  constructor_without_unaligned_array_assert
 
struct  copy_using_evaluator_DefaultTraversal_CompleteUnrolling
 
struct  copy_using_evaluator_DefaultTraversal_CompleteUnrolling< Kernel, Stop, Stop >
 
struct  copy_using_evaluator_DefaultTraversal_InnerUnrolling
 
struct  copy_using_evaluator_DefaultTraversal_InnerUnrolling< Kernel, Stop, Stop >
 
struct  copy_using_evaluator_innervec_CompleteUnrolling
 
struct  copy_using_evaluator_innervec_CompleteUnrolling< Kernel, Stop, Stop >
 
struct  copy_using_evaluator_innervec_InnerUnrolling
 
struct  copy_using_evaluator_innervec_InnerUnrolling< Kernel, Stop, Stop, SrcAlignment, DstAlignment >
 
struct  copy_using_evaluator_LinearTraversal_CompleteUnrolling
 
struct  copy_using_evaluator_LinearTraversal_CompleteUnrolling< Kernel, Stop, Stop >
 
struct  copy_using_evaluator_traits
 
struct  cross3_impl
 
struct  cross3_impl< Architecture::SSE, VectorLhs, VectorRhs, float, true >
 
struct  cwise_promote_storage_order
 
struct  cwise_promote_storage_order< LhsKind, Sparse, LhsOrder, RhsOrder >
 
struct  cwise_promote_storage_order< Sparse, RhsKind, LhsOrder, RhsOrder >
 
struct  cwise_promote_storage_order< Sparse, Sparse, Order, Order >
 
struct  cwise_promote_storage_type
 
struct  cwise_promote_storage_type< A, A, Functor >
 
struct  cwise_promote_storage_type< A, Dense, Functor >
 
struct  cwise_promote_storage_type< Dense, B, Functor >
 
struct  cwise_promote_storage_type< Dense, Dense, Functor >
 
struct  cwise_promote_storage_type< Dense, Sparse, Functor >
 
struct  cwise_promote_storage_type< Sparse, Dense, Functor >
 
struct  decrement_size
 
struct  default_digits10_impl
 
struct  default_digits10_impl< T, false, false >
 
struct  default_digits10_impl< T, false, true >
 
struct  default_packet_traits
 
struct  Dense2Dense
 
struct  Dense2Triangular
 
struct  dense_assignment_loop
 
struct  dense_assignment_loop< Kernel, DefaultTraversal, CompleteUnrolling >
 
struct  dense_assignment_loop< Kernel, DefaultTraversal, InnerUnrolling >
 
struct  dense_assignment_loop< Kernel, DefaultTraversal, NoUnrolling >
 
struct  dense_assignment_loop< Kernel, InnerVectorizedTraversal, CompleteUnrolling >
 
struct  dense_assignment_loop< Kernel, InnerVectorizedTraversal, InnerUnrolling >
 
struct  dense_assignment_loop< Kernel, InnerVectorizedTraversal, NoUnrolling >
 
struct  dense_assignment_loop< Kernel, LinearTraversal, CompleteUnrolling >
 
struct  dense_assignment_loop< Kernel, LinearTraversal, NoUnrolling >
 
struct  dense_assignment_loop< Kernel, LinearVectorizedTraversal, CompleteUnrolling >
 
struct  dense_assignment_loop< Kernel, LinearVectorizedTraversal, NoUnrolling >
 
struct  dense_assignment_loop< Kernel, SliceVectorizedTraversal, NoUnrolling >
 
class  dense_product_base
 
class  dense_product_base< Lhs, Rhs, Option, InnerProduct >
 
struct  dense_xpr_base
 
struct  dense_xpr_base< Derived, ArrayXpr >
 
struct  dense_xpr_base< Derived, MatrixXpr >
 
struct  determinant_impl
 
struct  determinant_impl< Derived, 1 >
 
struct  determinant_impl< Derived, 2 >
 
struct  determinant_impl< Derived, 3 >
 
struct  determinant_impl< Derived, 4 >
 
struct  Diagonal2Dense
 
struct  Diagonal2Sparse
 
struct  diagonal_product_evaluator_base
 
struct  direct_selfadjoint_eigenvalues
 
struct  direct_selfadjoint_eigenvalues< SolverType, 2, false >
 
struct  direct_selfadjoint_eigenvalues< SolverType, 3, false >
 
struct  div_assign_op
 
struct  dot_nocheck
 
struct  dot_nocheck< T, U, true >
 
struct  DoublePacket
 
struct  EigenBase2EigenBase
 
struct  eigenvalues_selector
 
struct  eigenvalues_selector< Derived, false >
 
struct  enable_if
 
struct  enable_if< true, T >
 
struct  enable_if_ref
 
struct  enable_if_ref< Ref< T >, Derived >
 
struct  EnableIf
 
struct  etor_product_coeff_impl
 
struct  etor_product_packet_impl
 
struct  etor_product_packet_impl< ColMajor, 0, Lhs, Rhs, Packet, LoadMode >
 
struct  etor_product_packet_impl< ColMajor, 1, Lhs, Rhs, Packet, LoadMode >
 
struct  etor_product_packet_impl< ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode >
 
struct  etor_product_packet_impl< ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode >
 
struct  etor_product_packet_impl< RowMajor, 0, Lhs, Rhs, Packet, LoadMode >
 
struct  etor_product_packet_impl< RowMajor, 1, Lhs, Rhs, Packet, LoadMode >
 
struct  etor_product_packet_impl< RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode >
 
struct  etor_product_packet_impl< RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode >
 
struct  eval
 
struct  eval< Array< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >
 
struct  eval< Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >
 
struct  eval< T, Dense >
 
struct  eval< T, DiagonalShape >
 
struct  eval< T, Sparse >
 
class  EvalToTemp
 
struct  evaluator
 
struct  evaluator< Array< Scalar, Rows, Cols, Options, MaxRows, MaxCols > >
 
struct  evaluator< Block< ArgType, BlockRows, BlockCols, InnerPanel > >
 
struct  evaluator< const T >
 
struct  evaluator< CwiseBinaryOp< BinaryOp, Lhs, Rhs > >
 
struct  evaluator< CwiseBinaryOp< internal::scalar_product_op< Scalar1, Scalar2 >, const CwiseNullaryOp< internal::scalar_constant_op< Scalar1 >, Plain1 >, const Product< Lhs, Rhs, DefaultProduct > > >
 
struct  evaluator< CwiseNullaryOp< NullaryOp, PlainObjectType > >
 
struct  evaluator< CwiseTernaryOp< TernaryOp, Arg1, Arg2, Arg3 > >
 
struct  evaluator< Diagonal< ArgType, DiagIndex > >
 
struct  evaluator< Diagonal< const Product< Lhs, Rhs, DefaultProduct >, DiagIndex > >
 
struct  evaluator< DynamicSparseMatrix< _Scalar, _Options, _StorageIndex > >
 
struct  evaluator< EvalToTemp< ArgType > >
 
struct  evaluator< Map< const SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
 
struct  evaluator< Map< PlainObjectType, MapOptions, StrideType > >
 
struct  evaluator< Map< SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
 
struct  evaluator< MappedSparseMatrix< _Scalar, _Options, _StorageIndex > >
 
struct  evaluator< Matrix< Scalar, Rows, Cols, Options, MaxRows, MaxCols > >
 
struct  evaluator< PartialReduxExpr< ArgType, MemberOp, Direction > >
 
struct  evaluator< PlainObjectBase< Derived > >
 
struct  evaluator< Product< Lhs, Rhs, Options > >
 
struct  evaluator< Ref< const SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
 
struct  evaluator< Ref< const SparseVector< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
 
struct  evaluator< Ref< PlainObjectType, RefOptions, StrideType > >
 
struct  evaluator< Ref< SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
 
struct  evaluator< Ref< SparseVector< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
 
struct  evaluator< ReturnByValue< Derived > >
 
struct  evaluator< Select< ConditionMatrixType, ThenMatrixType, ElseMatrixType > >
 
struct  evaluator< Solve< Decomposition, RhsType > >
 
struct  evaluator< SolveWithGuess< Decomposition, RhsType, GuessType > >
 
struct  evaluator< SparseCompressedBase< Derived > >
 
struct  evaluator< SparseMatrix< _Scalar, _Options, _StorageIndex > >
 
struct  evaluator< SparseVector< _Scalar, _Options, _Index > >
 
struct  evaluator_assume_aliasing
 
struct  evaluator_assume_aliasing< CwiseBinaryOp< internal::scalar_difference_op< typename OtherXpr::Scalar, typename Product< Lhs, Rhs, DefaultProduct >::Scalar >, const OtherXpr, const Product< Lhs, Rhs, DefaultProduct > >, DenseShape >
 
struct  evaluator_assume_aliasing< CwiseBinaryOp< internal::scalar_product_op< Scalar1, Scalar2 >, const CwiseNullaryOp< internal::scalar_constant_op< Scalar1 >, Plain1 >, const Product< Lhs, Rhs, DefaultProduct > > >
 
struct  evaluator_assume_aliasing< CwiseBinaryOp< internal::scalar_sum_op< typename OtherXpr::Scalar, typename Product< Lhs, Rhs, DefaultProduct >::Scalar >, const OtherXpr, const Product< Lhs, Rhs, DefaultProduct > >, DenseShape >
 
struct  evaluator_assume_aliasing< Product< Lhs, Rhs, DefaultProduct > >
 
struct  evaluator_base
 
struct  evaluator_traits
 
struct  evaluator_traits< BandMatrix< _Scalar, _Rows, _Cols, _Supers, _Subs, _Options > >
 
struct  evaluator_traits< BandMatrixWrapper< _CoefficientsType, _Rows, _Cols, _Supers, _Subs, _Options > >
 
struct  evaluator_traits< Homogeneous< ArgType, Direction > >
 
struct  evaluator_traits< HouseholderSequence< VectorsType, CoeffsType, Side > >
 
struct  evaluator_traits< SelfAdjointView< MatrixType, Mode > >
 
struct  evaluator_traits< SparseQRMatrixQReturnType< SparseQRType > >
 
struct  evaluator_traits< SparseSelfAdjointView< MatrixType, Mode > >
 
struct  evaluator_traits< TriangularView< MatrixType, Mode > >
 
struct  evaluator_traits_base
 
struct  evaluator_wrapper_base
 
struct  extract_data_selector
 
struct  extract_data_selector< T, false >
 
struct  false_type
 
struct  find_best_packet
 
struct  find_best_packet_helper
 
struct  find_best_packet_helper< Size, PacketType, false >
 
struct  find_best_packet_helper< Size, PacketType, true >
 
struct  first_aligned_impl
 
struct  first_aligned_impl< Alignment, Derived, false >
 
struct  FullPivHouseholderQRMatrixQReturnType
 Expression type for return value of FullPivHouseholderQR::matrixQ() More...
 
struct  functor_has_linear_access
 
struct  functor_traits
 
struct  functor_traits< add_assign_op< DstScalar, SrcScalar > >
 
struct  functor_traits< assign_op< DstScalar, SrcScalar > >
 
struct  functor_traits< bind1st_op< BinaryOp > >
 
struct  functor_traits< bind2nd_op< BinaryOp > >
 
struct  functor_traits< div_assign_op< DstScalar, SrcScalar > >
 
struct  functor_traits< linspaced_op< Scalar, PacketType > >
 
struct  functor_traits< max_coeff_visitor< Scalar > >
 
struct  functor_traits< min_coeff_visitor< Scalar > >
 
struct  functor_traits< mul_assign_op< DstScalar, SrcScalar > >
 
struct  functor_traits< scalar_abs2_op< Scalar > >
 
struct  functor_traits< scalar_abs_op< Scalar > >
 
struct  functor_traits< scalar_acos_op< Scalar > >
 
struct  functor_traits< scalar_arg_op< Scalar > >
 
struct  functor_traits< scalar_asin_op< Scalar > >
 
struct  functor_traits< scalar_atan_op< Scalar > >
 
struct  functor_traits< scalar_boolean_and_op >
 
struct  functor_traits< scalar_boolean_not_op< Scalar > >
 
struct  functor_traits< scalar_boolean_or_op >
 
struct  functor_traits< scalar_boolean_xor_op >
 
struct  functor_traits< scalar_cast_op< Eigen::half, float > >
 
struct  functor_traits< scalar_cast_op< float, Eigen::half > >
 
struct  functor_traits< scalar_cast_op< int, Eigen::half > >
 
struct  functor_traits< scalar_cast_op< Scalar, NewType > >
 
struct  functor_traits< scalar_ceil_op< Scalar > >
 
struct  functor_traits< scalar_cmp_op< LhsScalar, RhsScalar, cmp > >
 
struct  functor_traits< scalar_conj_product_op< LhsScalar, RhsScalar > >
 
struct  functor_traits< scalar_conjugate_op< Scalar > >
 
struct  functor_traits< scalar_constant_op< Scalar > >
 
struct  functor_traits< scalar_cos_op< Scalar > >
 
struct  functor_traits< scalar_cosh_op< Scalar > >
 
struct  functor_traits< scalar_cube_op< Scalar > >
 
struct  functor_traits< scalar_difference_op< LhsScalar, RhsScalar > >
 
struct  functor_traits< scalar_exp_op< Scalar > >
 
struct  functor_traits< scalar_floor_op< Scalar > >
 
struct  functor_traits< scalar_hypot_op< Scalar, Scalar > >
 
struct  functor_traits< scalar_identity_op< Scalar > >
 
struct  functor_traits< scalar_imag_op< Scalar > >
 
struct  functor_traits< scalar_imag_ref_op< Scalar > >
 
struct  functor_traits< scalar_inverse_op< Scalar > >
 
struct  functor_traits< scalar_isfinite_op< Scalar > >
 
struct  functor_traits< scalar_isinf_op< Scalar > >
 
struct  functor_traits< scalar_isnan_op< Scalar > >
 
struct  functor_traits< scalar_log10_op< Scalar > >
 
struct  functor_traits< scalar_log1p_op< Scalar > >
 
struct  functor_traits< scalar_log_op< Scalar > >
 
struct  functor_traits< scalar_max_op< LhsScalar, RhsScalar > >
 
struct  functor_traits< scalar_min_op< LhsScalar, RhsScalar > >
 
struct  functor_traits< scalar_opposite_op< Scalar > >
 
struct  functor_traits< scalar_pow_op< Scalar, Exponent > >
 
struct  functor_traits< scalar_product_op< LhsScalar, RhsScalar > >
 
struct  functor_traits< scalar_quotient_op< LhsScalar, RhsScalar > >
 
struct  functor_traits< scalar_random_op< Scalar > >
 
struct  functor_traits< scalar_real_op< Scalar > >
 
struct  functor_traits< scalar_real_ref_op< Scalar > >
 
struct  functor_traits< scalar_round_op< Scalar > >
 
struct  functor_traits< scalar_rsqrt_op< Scalar > >
 
struct  functor_traits< scalar_score_coeff_op< Scalar > >
 
struct  functor_traits< scalar_sign_op< Scalar > >
 
struct  functor_traits< scalar_sin_op< Scalar > >
 
struct  functor_traits< scalar_sinh_op< Scalar > >
 
struct  functor_traits< scalar_sqrt_op< Scalar > >
 
struct  functor_traits< scalar_square_op< Scalar > >
 
struct  functor_traits< scalar_sum_op< LhsScalar, RhsScalar > >
 
struct  functor_traits< scalar_tan_op< Scalar > >
 
struct  functor_traits< scalar_tanh_op< Scalar > >
 
struct  functor_traits< std::binary_negate< T > >
 
struct  functor_traits< std::binder1st< T > >
 
struct  functor_traits< std::binder2nd< T > >
 
struct  functor_traits< std::divides< T > >
 
struct  functor_traits< std::equal_to< T > >
 
struct  functor_traits< std::greater< T > >
 
struct  functor_traits< std::greater_equal< T > >
 
struct  functor_traits< std::less< T > >
 
struct  functor_traits< std::less_equal< T > >
 
struct  functor_traits< std::logical_and< T > >
 
struct  functor_traits< std::logical_not< T > >
 
struct  functor_traits< std::logical_or< T > >
 
struct  functor_traits< std::minus< T > >
 
struct  functor_traits< std::multiplies< T > >
 
struct  functor_traits< std::negate< T > >
 
struct  functor_traits< std::not_equal_to< T > >
 
struct  functor_traits< std::plus< T > >
 
struct  functor_traits< std::unary_negate< T > >
 
struct  functor_traits< sub_assign_op< DstScalar, SrcScalar > >
 
struct  functor_traits< swap_assign_op< Scalar > >
 
struct  gebp_kernel
 
struct  gebp_madd_selector
 
struct  gebp_madd_selector< CJ, T, T, T, T >
 
class  gebp_traits
 
class  gebp_traits< RealScalar, std::complex< RealScalar >, false, _ConjRhs >
 
class  gebp_traits< std::complex< RealScalar >, RealScalar, _ConjLhs, false >
 
class  gebp_traits< std::complex< RealScalar >, std::complex< RealScalar >, _ConjLhs, _ConjRhs >
 
class  gemm_blocking_space
 
class  gemm_blocking_space< StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, false >
 
class  gemm_blocking_space< StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, true >
 
struct  gemm_functor
 
struct  gemm_pack_lhs
 
struct  gemm_pack_lhs< Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode >
 
struct  gemm_pack_lhs< Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode >
 
struct  gemm_pack_rhs
 
struct  gemm_pack_rhs< Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode >
 
struct  gemm_pack_rhs< Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode >
 
struct  GemmParallelInfo
 
struct  gemv_dense_selector
 
struct  gemv_dense_selector< OnTheLeft, StorageOrder, BlasCompatible >
 
struct  gemv_dense_selector< OnTheRight, ColMajor, false >
 
struct  gemv_dense_selector< OnTheRight, ColMajor, true >
 
struct  gemv_dense_selector< OnTheRight, RowMajor, false >
 
struct  gemv_dense_selector< OnTheRight, RowMajor, true >
 
struct  gemv_static_vector_if
 
struct  gemv_static_vector_if< Scalar, Size, Dynamic, true >
 
struct  gemv_static_vector_if< Scalar, Size, MaxSize, false >
 
struct  gemv_static_vector_if< Scalar, Size, MaxSize, true >
 
struct  general_matrix_matrix_product
 
struct  general_matrix_matrix_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ColMajor >
 
struct  general_matrix_matrix_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, RowMajor >
 
struct  general_matrix_matrix_rankupdate
 
struct  general_matrix_matrix_triangular_product
 
struct  general_matrix_matrix_triangular_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ColMajor, UpLo, Version >
 
struct  general_matrix_matrix_triangular_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, RowMajor, UpLo, Version >
 
struct  general_matrix_vector_product
 
struct  general_matrix_vector_product< Index, LhsScalar, LhsMapper, ColMajor, ConjugateLhs, RhsScalar, RhsMapper, ConjugateRhs, Version >
 
struct  general_matrix_vector_product< Index, LhsScalar, LhsMapper, RowMajor, ConjugateLhs, RhsScalar, RhsMapper, ConjugateRhs, Version >
 
struct  general_matrix_vector_product_gemv
 
class  generic_dense_assignment_kernel
 
class  generic_dense_assignment_kernel< DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op< typename DstEvaluatorTypeT::Scalar >, Specialized >
 
class  generic_matrix_wrapper
 
class  generic_matrix_wrapper< MatrixType, false >
 
class  generic_matrix_wrapper< MatrixType, true >
 
struct  generic_product_impl
 
struct  generic_product_impl< Homogeneous< LhsArg, Horizontal >, Rhs, HomogeneousShape, DenseShape, ProductTag >
 
struct  generic_product_impl< Inverse< Lhs >, Rhs, PermutationShape, MatrixShape, ProductTag >
 
struct  generic_product_impl< Lhs, Homogeneous< RhsArg, Vertical >, DenseShape, HomogeneousShape, ProductTag >
 
struct  generic_product_impl< Lhs, Homogeneous< RhsArg, Vertical >, TriangularShape, HomogeneousShape, ProductTag >
 
struct  generic_product_impl< Lhs, Inverse< Rhs >, MatrixShape, PermutationShape, ProductTag >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, CoeffBasedProductMode >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, GemmProduct >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, GemvProduct >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, InnerProduct >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, LazyCoeffBasedProductMode >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, OuterProduct >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, SelfAdjointShape, ProductTag >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, SparseShape, ProductType >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType >
 
struct  generic_product_impl< Lhs, Rhs, DenseShape, TriangularShape, ProductTag >
 
struct  generic_product_impl< Lhs, Rhs, MatrixShape, PermutationShape, ProductTag >
 
struct  generic_product_impl< Lhs, Rhs, MatrixShape, TranspositionsShape, ProductTag >
 
struct  generic_product_impl< Lhs, Rhs, PermutationShape, MatrixShape, ProductTag >
 
struct  generic_product_impl< Lhs, Rhs, SelfAdjointShape, DenseShape, ProductTag >
 
struct  generic_product_impl< Lhs, Rhs, SparseShape, DenseShape, ProductType >
 
struct  generic_product_impl< Lhs, Rhs, SparseShape, SparseShape, ProductType >
 
struct  generic_product_impl< Lhs, Rhs, SparseShape, SparseTriangularShape, ProductType >
 
struct  generic_product_impl< Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType >
 
struct  generic_product_impl< Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType >
 
struct  generic_product_impl< Lhs, Rhs, TranspositionsShape, MatrixShape, ProductTag >
 
struct  generic_product_impl< Lhs, Rhs, TriangularShape, DenseShape, ProductTag >
 
struct  generic_product_impl< Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType >
 
struct  generic_product_impl< Lhs, Transpose< Rhs >, MatrixShape, TranspositionsShape, ProductTag >
 
struct  generic_product_impl< LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType >
 
struct  generic_product_impl< Transform< Scalar, Dim, Mode, Options >, Homogeneous< RhsArg, Vertical >, DenseShape, HomogeneousShape, ProductTag >
 
struct  generic_product_impl< Transpose< Lhs >, Rhs, TranspositionsShape, MatrixShape, ProductTag >
 
struct  generic_product_impl_base
 
struct  generic_xpr_base
 
struct  generic_xpr_base< Derived, MatrixXpr, SolverStorage >
 
struct  generic_xpr_base< Derived, MatrixXpr, Sparse >
 
struct  generic_xpr_base< Derived, XprKind, Dense >
 
struct  get_factor
 
struct  get_factor< Scalar, typename NumTraits< Scalar >::Real >
 
struct  global_math_functions_filtering_base
 
struct  global_math_functions_filtering_base< T, typename always_void< typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl >::type >
 
struct  glue_shapes
 
struct  glue_shapes< DenseShape, TriangularShape >
 
struct  glue_shapes< SparseShape, SelfAdjointShape >
 
struct  glue_shapes< SparseShape, TriangularShape >
 
struct  has_binary_operator
 
struct  has_binary_operator< linspaced_op< Scalar, PacketType >, IndexType >
 
struct  has_binary_operator< scalar_constant_op< Scalar >, IndexType >
 
struct  has_binary_operator< scalar_identity_op< Scalar >, IndexType >
 
struct  has_binary_operator< scalar_random_op< Scalar >, IndexType >
 
struct  has_direct_access
 
struct  has_none
 
struct  has_nullary_operator
 
struct  has_nullary_operator< linspaced_op< Scalar, PacketType >, IndexType >
 
struct  has_nullary_operator< scalar_constant_op< Scalar >, IndexType >
 
struct  has_nullary_operator< scalar_identity_op< Scalar >, IndexType >
 
struct  has_nullary_operator< scalar_random_op< Scalar >, IndexType >
 
struct  has_ReturnType
 
struct  has_std_result_type
 
struct  has_tr1_result
 
struct  has_unary_operator
 
struct  has_unary_operator< linspaced_op< Scalar, PacketType >, IndexType >
 
struct  has_unary_operator< scalar_constant_op< Scalar >, IndexType >
 
struct  has_unary_operator< scalar_identity_op< Scalar >, IndexType >
 
struct  has_unary_operator< scalar_random_op< Scalar >, IndexType >
 
struct  HessenbergDecompositionMatrixHReturnType
 Expression type for return value of HessenbergDecomposition::matrixH() More...
 
struct  homogeneous_left_product_impl
 
struct  homogeneous_left_product_impl< Homogeneous< MatrixType, Vertical >, Lhs >
 
struct  homogeneous_left_product_refactoring_helper
 
struct  homogeneous_right_product_impl
 
struct  homogeneous_right_product_impl< Homogeneous< MatrixType, Horizontal >, Rhs >
 
struct  homogeneous_right_product_refactoring_helper
 
struct  householder_qr_inplace_blocked
 
struct  HouseholderSequenceShape
 
struct  hseq_side_dependent_impl
 
struct  hseq_side_dependent_impl< VectorsType, CoeffsType, OnTheRight >
 
struct  hypot_impl
 
struct  hypot_retval
 
struct  imag_default_impl
 
struct  imag_default_impl< Scalar, true >
 
struct  imag_impl
 
struct  imag_ref_default_impl
 
struct  imag_ref_default_impl< Scalar, false >
 
struct  imag_ref_impl
 
struct  imag_ref_retval
 
struct  imag_retval
 
struct  image_retval
 
struct  image_retval< FullPivLU< _MatrixType > >
 
class  image_retval_base
 
struct  IndexBased
 
class  inner_iterator_selector
 
class  inner_iterator_selector< XprType, IndexBased >
 
class  inner_iterator_selector< XprType, IteratorBased >
 
struct  inner_stride_at_compile_time
 
struct  inner_stride_at_compile_time< Derived, false >
 
struct  inplace_transpose_selector
 
struct  inplace_transpose_selector< MatrixType, false, MatchPacketSize >
 
struct  inplace_transpose_selector< MatrixType, true, false >
 
struct  inplace_transpose_selector< MatrixType, true, true >
 
struct  inverse_impl
 
struct  is_arithmetic
 
struct  is_arithmetic< __m128 >
 
struct  is_arithmetic< __m128d >
 
struct  is_arithmetic< __m128i >
 
struct  is_arithmetic< __m256 >
 
struct  is_arithmetic< __m256d >
 
struct  is_arithmetic< __m256i >
 
struct  is_arithmetic< __m512 >
 
struct  is_arithmetic< __m512d >
 
struct  is_arithmetic< __m512i >
 
struct  is_arithmetic< bool >
 
struct  is_arithmetic< char >
 
struct  is_arithmetic< double >
 
struct  is_arithmetic< float >
 
struct  is_arithmetic< half >
 
struct  is_arithmetic< long double >
 
struct  is_arithmetic< signed char >
 
struct  is_arithmetic< signed int >
 
struct  is_arithmetic< signed long >
 
struct  is_arithmetic< signed short >
 
struct  is_arithmetic< unsigned char >
 
struct  is_arithmetic< unsigned int >
 
struct  is_arithmetic< unsigned long >
 
struct  is_arithmetic< unsigned short >
 
struct  is_const
 
struct  is_const< T const >
 
struct  is_convertible
 
struct  is_convertible_impl
 
struct  is_diagonal
 
struct  is_diagonal< DiagonalBase< T > >
 
struct  is_diagonal< DiagonalMatrix< T, S > >
 
struct  is_diagonal< DiagonalWrapper< T > >
 
struct  is_integral
 
struct  is_integral< bool >
 
struct  is_integral< char >
 
struct  is_integral< signed char >
 
struct  is_integral< signed int >
 
struct  is_integral< signed long >
 
struct  is_integral< signed short >
 
struct  is_integral< unsigned char >
 
struct  is_integral< unsigned int >
 
struct  is_integral< unsigned long >
 
struct  is_integral< unsigned short >
 
struct  is_lvalue
 
struct  is_ref_compatible
 
struct  is_ref_compatible_impl
 
struct  is_same
 
struct  is_same< T, T >
 
struct  isApprox_selector
 
struct  isApprox_selector< Derived, OtherDerived, true >
 
struct  isMuchSmallerThan_object_selector
 
struct  isMuchSmallerThan_object_selector< Derived, OtherDerived, true >
 
struct  isMuchSmallerThan_scalar_selector
 
struct  isMuchSmallerThan_scalar_selector< Derived, true >
 
struct  IteratorBased
 
struct  kernel_retval
 
struct  kernel_retval< FullPivLU< _MatrixType > >
 
class  kernel_retval_base
 
struct  lapacke_llt
 
struct  ldlt_inplace
 
struct  ldlt_inplace< Lower >
 
struct  ldlt_inplace< Upper >
 
struct  LDLT_Traits
 
struct  LDLT_Traits< MatrixType, Lower >
 
struct  LDLT_Traits< MatrixType, Upper >
 
class  level3_blocking
 
struct  linspaced_op
 
struct  linspaced_op_impl
 
struct  linspaced_op_impl< Scalar, Packet, false >
 
struct  linspaced_op_impl< Scalar, Packet, true >
 
struct  llt_inplace
 
struct  llt_inplace< Scalar, Lower >
 
struct  llt_inplace< Scalar, Upper >
 
struct  LLT_Traits
 
struct  LLT_Traits< MatrixType, Lower >
 
struct  LLT_Traits< MatrixType, Upper >
 
struct  log1p_impl
 
struct  log1p_retval
 
struct  lpNorm_selector
 
struct  lpNorm_selector< Derived, 1 >
 
struct  lpNorm_selector< Derived, 2 >
 
struct  lpNorm_selector< Derived, Infinity >
 
struct  LU_GlobalLU_t
 
struct  LU_kernel_bmod
 
struct  LU_kernel_bmod< 1 >
 
class  make_proper_matrix_type
 
struct  make_unsigned
 
struct  make_unsigned< char >
 
struct  make_unsigned< signed char >
 
struct  make_unsigned< signed int >
 
struct  make_unsigned< signed long >
 
struct  make_unsigned< signed short >
 
struct  make_unsigned< unsigned char >
 
struct  make_unsigned< unsigned int >
 
struct  make_unsigned< unsigned long >
 
struct  make_unsigned< unsigned short >
 
struct  mapbase_evaluator
 
class  MappedSuperNodalMatrix
 a class to manipulate the L supernodal factor from the SparseLU factorization More...
 
struct  matrix_swap_impl
 
struct  matrix_swap_impl< MatrixTypeA, MatrixTypeB, true >
 
struct  matrix_type_times_scalar_type
 
struct  max_coeff_visitor
 
struct  member_lpnorm
 
struct  member_redux
 
struct  meta_floor_log2
 
struct  meta_floor_log2< n, lower, upper, meta_floor_log2_bogus >
 
struct  meta_floor_log2< n, lower, upper, meta_floor_log2_move_down >
 
struct  meta_floor_log2< n, lower, upper, meta_floor_log2_move_up >
 
struct  meta_floor_log2< n, lower, upper, meta_floor_log2_terminate >
 
struct  meta_floor_log2_selector
 
struct  meta_least_common_multiple
 
struct  meta_least_common_multiple< A, B, K, true >
 
struct  meta_no
 
class  meta_sqrt
 
class  meta_sqrt< Y, InfX, SupX, true >
 
struct  meta_yes
 
struct  min_coeff_visitor
 
struct  mul_assign_op
 
struct  nested_eval
 
struct  nested_eval< ReturnByValue< Derived >, n, PlainObject >
 
class  no_assignment_operator
 
class  noncopyable
 
struct  norm1_default_impl
 
struct  norm1_default_impl< Scalar, false >
 
struct  norm1_impl
 
struct  norm1_retval
 
struct  nullary_wrapper
 
struct  nullary_wrapper< Scalar, NullaryOp, false, false, false >
 
struct  nullary_wrapper< Scalar, NullaryOp, false, false, true >
 
struct  nullary_wrapper< Scalar, NullaryOp, false, true, false >
 
struct  nullary_wrapper< Scalar, NullaryOp, true, false, false >
 
struct  outer_stride_at_compile_time
 
struct  outer_stride_at_compile_time< Derived, false >
 
union  Packet
 
struct  Packet1cd
 
struct  Packet2cd
 
struct  Packet2cf
 
union  Packet2cf.__unnamed74__
 
struct  Packet4cf
 
struct  Packet4f
 
struct  packet_traits
 
struct  packet_traits< const T >
 
struct  packet_traits< double >
 
struct  packet_traits< float >
 
struct  packet_traits< int >
 
struct  packet_traits< int32_t >
 
struct  packet_traits< std::complex< double > >
 
struct  packet_traits< std::complex< float > >
 
struct  PacketBlock
 
struct  palign_impl
 
struct  palign_impl< Offset, Packet16f >
 
struct  palign_impl< Offset, Packet1cd >
 
struct  palign_impl< Offset, Packet2cd >
 
struct  palign_impl< Offset, Packet2cf >
 
struct  palign_impl< Offset, Packet2d >
 
struct  palign_impl< Offset, Packet4cf >
 
struct  palign_impl< Offset, Packet4d >
 
struct  palign_impl< Offset, Packet4f >
 
struct  palign_impl< Offset, Packet4i >
 
struct  palign_impl< Offset, Packet8d >
 
struct  palign_impl< Offset, Packet8f >
 
struct  panel_dfs_traits
 
struct  pardiso_run_selector
 
struct  pardiso_run_selector< long long int >
 
struct  pardiso_traits
 
struct  pardiso_traits< PardisoLDLT< _MatrixType, Options > >
 
struct  pardiso_traits< PardisoLLT< _MatrixType, Options > >
 
struct  pardiso_traits< PardisoLU< _MatrixType > >
 
struct  partial_lu_impl
 
struct  pastix_traits
 
struct  pastix_traits< PastixLDLT< _MatrixType, Options > >
 
struct  pastix_traits< PastixLLT< _MatrixType, Options > >
 
struct  pastix_traits< PastixLU< _MatrixType > >
 
struct  perfvalues
 
struct  permutation_matrix_product
 
struct  permutation_matrix_product< ExpressionType, Side, Transposed, DenseShape >
 
struct  permutation_matrix_product< ExpressionType, Side, Transposed, HomogeneousShape >
 
struct  permutation_matrix_product< ExpressionType, Side, Transposed, SparseShape >
 
struct  plain_array
 
struct  plain_array< T, 0, MatrixOrArrayOptions, Alignment >
 
struct  plain_array< T, Size, MatrixOrArrayOptions, 16 >
 
struct  plain_array< T, Size, MatrixOrArrayOptions, 32 >
 
struct  plain_array< T, Size, MatrixOrArrayOptions, 64 >
 
struct  plain_array< T, Size, MatrixOrArrayOptions, 8 >
 
struct  plain_col_type
 
struct  plain_constant_type
 
struct  plain_diag_type
 
struct  plain_matrix_type
 
struct  plain_matrix_type< T, Dense >
 
struct  plain_matrix_type< T, DiagonalShape >
 
struct  plain_matrix_type< T, Sparse >
 
struct  plain_matrix_type_column_major
 
struct  plain_matrix_type_dense
 
struct  plain_matrix_type_dense< T, ArrayXpr, Flags >
 
struct  plain_matrix_type_dense< T, MatrixXpr, Flags >
 
struct  plain_matrix_type_row_major
 
struct  plain_object_eval
 
struct  plain_object_eval< T, Dense >
 
struct  plain_object_eval< T, Sparse >
 
struct  plain_row_type
 
struct  pow_impl
 
struct  pow_impl< ScalarX, ScalarY, true >
 
struct  product_evaluator
 
struct  product_evaluator< Product< Lhs, Rhs, AliasFreeProduct >, ProductTag, PermutationShape, SparseShape >
 
struct  product_evaluator< Product< Lhs, Rhs, AliasFreeProduct >, ProductTag, SparseShape, PermutationShape >
 
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, LazyCoeffBasedProductMode, DenseShape, DenseShape >
 
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, OuterProduct, DenseShape, SparseShape >
 
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, OuterProduct, SparseShape, DenseShape >
 
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, ProductTag, DiagonalShape, SparseShape >
 
struct  product_evaluator< Product< Lhs, Rhs, DefaultProduct >, ProductTag, SparseShape, DiagonalShape >
 
struct  product_evaluator< Product< Lhs, Rhs, LazyProduct >, ProductTag, DenseShape, DenseShape >
 
struct  product_evaluator< Product< Lhs, Rhs, LazyProduct >, ProductTag, DenseShape, HomogeneousShape >
 
struct  product_evaluator< Product< Lhs, Rhs, LazyProduct >, ProductTag, HomogeneousShape, DenseShape >
 
struct  product_evaluator< Product< Lhs, Rhs, Options >, ProductTag, LhsShape, RhsShape >
 
struct  product_evaluator< Product< Lhs, Rhs, ProductKind >, ProductTag, DenseShape, DiagonalShape >
 
struct  product_evaluator< Product< Lhs, Rhs, ProductKind >, ProductTag, DiagonalShape, DenseShape >
 
struct  product_evaluator< Product< Lhs, RhsView, DefaultProduct >, ProductTag, SparseShape, SparseSelfAdjointShape >
 
struct  product_evaluator< Product< LhsView, Rhs, DefaultProduct >, ProductTag, SparseSelfAdjointShape, SparseShape >
 
struct  product_promote_storage_type
 
struct  product_promote_storage_type< A, A, ProductTag >
 
struct  product_promote_storage_type< A, Dense, ProductTag >
 
struct  product_promote_storage_type< A, DiagonalShape, ProductTag >
 
struct  product_promote_storage_type< A, PermutationStorage, ProductTag >
 
struct  product_promote_storage_type< Dense, B, ProductTag >
 
struct  product_promote_storage_type< Dense, Dense, ProductTag >
 
struct  product_promote_storage_type< Dense, DiagonalShape, ProductTag >
 
struct  product_promote_storage_type< Dense, PermutationStorage, ProductTag >
 
struct  product_promote_storage_type< Dense, Sparse, OuterProduct >
 
struct  product_promote_storage_type< DiagonalShape, B, ProductTag >
 
struct  product_promote_storage_type< DiagonalShape, Dense, ProductTag >
 
struct  product_promote_storage_type< PermutationStorage, B, ProductTag >
 
struct  product_promote_storage_type< PermutationStorage, Dense, ProductTag >
 
struct  product_promote_storage_type< PermutationStorage, Sparse, ProductTag >
 
struct  product_promote_storage_type< Sparse, Dense, OuterProduct >
 
struct  product_promote_storage_type< Sparse, PermutationStorage, ProductTag >
 
struct  product_selfadjoint_matrix
 
struct  product_selfadjoint_matrix< Scalar, Index, LhsStorageOrder, false, ConjugateLhs, RhsStorageOrder, true, ConjugateRhs, ColMajor >
 
struct  product_selfadjoint_matrix< Scalar, Index, LhsStorageOrder, LhsSelfAdjoint, ConjugateLhs, RhsStorageOrder, RhsSelfAdjoint, ConjugateRhs, RowMajor >
 
struct  product_selfadjoint_matrix< Scalar, Index, LhsStorageOrder, true, ConjugateLhs, RhsStorageOrder, false, ConjugateRhs, ColMajor >
 
struct  product_size_category
 
struct  product_triangular_matrix_matrix
 
struct  product_triangular_matrix_matrix< Scalar, Index, Mode, false, LhsStorageOrder, ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, Version >
 
struct  product_triangular_matrix_matrix< Scalar, Index, Mode, LhsIsTriangular, LhsStorageOrder, ConjugateLhs, RhsStorageOrder, ConjugateRhs, RowMajor, Version >
 
struct  product_triangular_matrix_matrix< Scalar, Index, Mode, true, LhsStorageOrder, ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, Version >
 
struct  product_triangular_matrix_matrix_trmm
 
struct  product_type
 
struct  product_type_selector
 
struct  product_type_selector< 1, 1, 1 >
 
struct  product_type_selector< 1, 1, Depth >
 
struct  product_type_selector< 1, Large, Large >
 
struct  product_type_selector< 1, Large, Small >
 
struct  product_type_selector< 1, N, 1 >
 
struct  product_type_selector< 1, Small, Large >
 
struct  product_type_selector< 1, Small, Small >
 
struct  product_type_selector< Large, 1, Large >
 
struct  product_type_selector< Large, 1, Small >
 
struct  product_type_selector< Large, Large, Large >
 
struct  product_type_selector< Large, Large, Small >
 
struct  product_type_selector< Large, Small, 1 >
 
struct  product_type_selector< Large, Small, Large >
 
struct  product_type_selector< Large, Small, Small >
 
struct  product_type_selector< M, 1, 1 >
 
struct  product_type_selector< M, N, 1 >
 
struct  product_type_selector< Small, 1, Large >
 
struct  product_type_selector< Small, 1, Small >
 
struct  product_type_selector< Small, Large, 1 >
 
struct  product_type_selector< Small, Large, Large >
 
struct  product_type_selector< Small, Large, Small >
 
struct  product_type_selector< Small, Small, 1 >
 
struct  product_type_selector< Small, Small, Large >
 
struct  product_type_selector< Small, Small, Small >
 
struct  projective_transform_inverse
 
struct  projective_transform_inverse< TransformType, Projective >
 
struct  promote_index_type
 
struct  promote_scalar_arg
 
struct  promote_scalar_arg< S, T, false >
 
struct  promote_scalar_arg< S, T, true >
 
struct  promote_scalar_arg_unsupported
 
struct  promote_scalar_arg_unsupported< ExprScalar, T, PromotedType, false, true >
 
struct  promote_scalar_arg_unsupported< S, T, PromotedType, ConvertibleToLiteral, false >
 
struct  promote_scalar_arg_unsupported< S, T, PromotedType, true, true >
 
struct  promote_scalar_arg_unsupported< S, T, S, false, true >
 
struct  promote_storage_type
 
struct  promote_storage_type< A, A >
 
struct  promote_storage_type< A, const A >
 
struct  promote_storage_type< const A, A >
 
struct  qr_preconditioner_impl
 
class  qr_preconditioner_impl< MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true >
 
class  qr_preconditioner_impl< MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true >
 
class  qr_preconditioner_impl< MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true >
 
class  qr_preconditioner_impl< MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true >
 
class  qr_preconditioner_impl< MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true >
 
class  qr_preconditioner_impl< MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true >
 
class  qr_preconditioner_impl< MatrixType, QRPreconditioner, Case, false >
 
struct  qr_preconditioner_should_do_anything
 
struct  quat_conj
 
struct  quat_conj< Architecture::SSE, Derived, double >
 
struct  quat_conj< Architecture::SSE, Derived, float >
 
struct  quat_product
 
struct  quat_product< Architecture::SSE, Derived, OtherDerived, double >
 
struct  quat_product< Architecture::SSE, Derived, OtherDerived, float >
 
struct  quaternionbase_assign_impl
 
struct  quaternionbase_assign_impl< Other, 3, 3 >
 
struct  quaternionbase_assign_impl< Other, 4, 1 >
 
struct  random_default_impl
 
struct  random_default_impl< half, false, false >
 
struct  random_default_impl< Scalar, false, false >
 
struct  random_default_impl< Scalar, false, true >
 
struct  random_default_impl< Scalar, true, false >
 
struct  random_impl
 
struct  random_impl< bool >
 
struct  random_retval
 
struct  rcond_compute_sign
 
struct  rcond_compute_sign< Vector, Vector, false >
 
struct  real_default_impl
 
struct  real_default_impl< Scalar, true >
 
struct  real_impl
 
struct  real_ref_impl
 
struct  real_ref_retval
 
struct  real_retval
 
class  redux_evaluator
 
struct  redux_impl
 
struct  redux_impl< Func, Derived, DefaultTraversal, CompleteUnrolling >
 
struct  redux_impl< Func, Derived, DefaultTraversal, NoUnrolling >
 
struct  redux_impl< Func, Derived, LinearVectorizedTraversal, CompleteUnrolling >
 
struct  redux_impl< Func, Derived, LinearVectorizedTraversal, NoUnrolling >
 
struct  redux_impl< Func, Derived, SliceVectorizedTraversal, Unrolling >
 
struct  redux_novec_unroller
 
struct  redux_novec_unroller< Func, Derived, Start, 0 >
 
struct  redux_novec_unroller< Func, Derived, Start, 1 >
 
struct  redux_traits
 
struct  redux_vec_unroller
 
struct  redux_vec_unroller< Func, Derived, Start, 1 >
 
struct  ref_selector
 
struct  remove_all
 
struct  remove_all< const T >
 
struct  remove_all< T & >
 
struct  remove_all< T * >
 
struct  remove_all< T const & >
 
struct  remove_all< T const * >
 
struct  remove_const
 
struct  remove_const< const T >
 
struct  remove_const< const T[]>
 
struct  remove_const< const T[Size]>
 
struct  remove_pointer
 
struct  remove_pointer< T * >
 
struct  remove_pointer< T *const >
 
struct  remove_reference
 
struct  remove_reference< T & >
 
struct  result_of
 
struct  result_of< Func(ArgType)>
 
struct  result_of< Func(ArgType0, ArgType1)>
 
struct  result_of< Func(ArgType0, ArgType1, ArgType2)>
 
struct  result_of< scalar_cmp_op< LhsScalar, RhsScalar, Cmp >(LhsScalar, RhsScalar)>
 
struct  reverse_packet_cond
 
struct  reverse_packet_cond< PacketType, false >
 
struct  rotation_base_generic_product_selector
 
struct  rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix< Scalar, Dim, MaxDim >, false >
 
struct  rotation_base_generic_product_selector< RotationDerived, MatrixType, false >
 
struct  rotation_base_generic_product_selector< RotationDerived, OtherVectorType, true >
 
struct  round_impl
 
struct  round_retval
 
struct  scalar_abs2_op
 
struct  scalar_abs_op
 
struct  scalar_acos_op
 
struct  scalar_arg_op
 
struct  scalar_asin_op
 
struct  scalar_atan_op
 
struct  scalar_betainc_op
 
struct  scalar_boolean_and_op
 
struct  scalar_boolean_not_op
 
struct  scalar_boolean_or_op
 
struct  scalar_boolean_xor_op
 
struct  scalar_cast_op
 
struct  scalar_cast_op< Eigen::half, float >
 
struct  scalar_cast_op< float, Eigen::half >
 
struct  scalar_cast_op< int, Eigen::half >
 
struct  scalar_ceil_op
 
struct  scalar_cmp_op
 
struct  scalar_cmp_op< LhsScalar, RhsScalar, cmp_EQ >
 
struct  scalar_cmp_op< LhsScalar, RhsScalar, cmp_GE >
 
struct  scalar_cmp_op< LhsScalar, RhsScalar, cmp_GT >
 
struct  scalar_cmp_op< LhsScalar, RhsScalar, cmp_LE >
 
struct  scalar_cmp_op< LhsScalar, RhsScalar, cmp_LT >
 
struct  scalar_cmp_op< LhsScalar, RhsScalar, cmp_NEQ >
 
struct  scalar_cmp_op< LhsScalar, RhsScalar, cmp_UNORD >
 
struct  scalar_conj_product_op
 
struct  scalar_conjugate_op
 
struct  scalar_constant_op
 
struct  scalar_cos_op
 
struct  scalar_cosh_op
 
struct  scalar_cube_op
 
struct  scalar_difference_op
 
struct  scalar_digamma_op
 
struct  scalar_div_cost
 
struct  scalar_div_cost< double, true >
 
struct  scalar_div_cost< float, true >
 
struct  scalar_div_cost< signed long, Vectorized, typename conditional< sizeof(long)==8, void, false_type >::type >
 
struct  scalar_div_cost< std::complex< T >, Vectorized >
 
struct  scalar_div_cost< unsigned long, Vectorized, typename conditional< sizeof(long)==8, void, false_type >::type >
 
struct  scalar_erf_op
 
struct  scalar_erfc_op
 
struct  scalar_exp_op
 
struct  scalar_floor_op
 
struct  scalar_fuzzy_default_impl
 
struct  scalar_fuzzy_default_impl< Scalar, false, false >
 
struct  scalar_fuzzy_default_impl< Scalar, false, true >
 
struct  scalar_fuzzy_default_impl< Scalar, true, false >
 
struct  scalar_fuzzy_impl
 
struct  scalar_fuzzy_impl< bool >
 
struct  scalar_hypot_op
 
struct  scalar_hypot_op< Scalar, Scalar >
 
struct  scalar_identity_op
 
struct  scalar_igamma_op
 
struct  scalar_igammac_op
 
struct  scalar_imag_op
 
struct  scalar_imag_ref_op
 
struct  scalar_inverse_op
 
struct  scalar_isfinite_op
 
struct  scalar_isinf_op
 
struct  scalar_isnan_op
 
struct  scalar_lgamma_op
 
struct  scalar_log10_op
 
struct  scalar_log1p_op
 
struct  scalar_log_op
 
struct  scalar_max_op
 
struct  scalar_min_op
 
struct  scalar_opposite_op
 
struct  scalar_pow_op
 
struct  scalar_product_op
 
struct  scalar_product_traits
 
struct  scalar_quotient_op
 
struct  scalar_random_op
 
struct  scalar_real_op
 
struct  scalar_real_ref_op
 
struct  scalar_round_op
 
struct  scalar_rsqrt_op
 
struct  scalar_score_coeff_op
 
struct  scalar_sign_op
 
struct  scalar_sign_op< Scalar, false >
 
struct  scalar_sign_op< Scalar, true >
 
struct  scalar_sin_op
 
struct  scalar_sinh_op
 
struct  scalar_sqrt_op
 
struct  scalar_square_op
 
struct  scalar_sum_op
 
struct  scalar_sum_op< bool, bool >
 
struct  scalar_tan_op
 
struct  scalar_tanh_op
 
struct  scalar_zeta_op
 
class  scoped_array
 
struct  Selector
 
struct  selfadjoint_matrix_vector_product
 
struct  selfadjoint_matrix_vector_product_symv
 
struct  selfadjoint_product_impl
 
struct  selfadjoint_product_impl< Lhs, 0, true, Rhs, RhsMode, false >
 
struct  selfadjoint_product_impl< Lhs, LhsMode, false, Rhs, 0, true >
 
struct  selfadjoint_product_impl< Lhs, LhsMode, false, Rhs, RhsMode, false >
 
struct  selfadjoint_rank2_update_selector
 
struct  selfadjoint_rank2_update_selector< Scalar, Index, UType, VType, Lower >
 
struct  selfadjoint_rank2_update_selector< Scalar, Index, UType, VType, Upper >
 
struct  setIdentity_impl
 
struct  setIdentity_impl< Derived, true >
 
struct  significant_decimals_impl
 
struct  simplicial_cholesky_grab_input
 
struct  simplicial_cholesky_grab_input< MatrixType, MatrixType >
 
struct  size_at_compile_time
 
struct  size_of_xpr_at_compile_time
 
struct  smart_copy_helper
 
struct  smart_copy_helper< T, false >
 
struct  smart_copy_helper< T, true >
 
struct  smart_memmove_helper
 
struct  smart_memmove_helper< T, false >
 
struct  smart_memmove_helper< T, true >
 
struct  solve_traits
 
struct  solve_traits< Decomposition, RhsType, Dense >
 
struct  solve_traits< Decomposition, RhsType, Sparse >
 
struct  Sparse2Dense
 
struct  Sparse2Sparse
 
struct  sparse_conjunction_evaluator
 
struct  sparse_conjunction_evaluator< XprType, IndexBased, IteratorBased >
 
struct  sparse_conjunction_evaluator< XprType, IteratorBased, IndexBased >
 
struct  sparse_conjunction_evaluator< XprType, IteratorBased, IteratorBased >
 
struct  sparse_dense_outer_product_evaluator
 
struct  sparse_diagonal_product_evaluator
 
struct  sparse_diagonal_product_evaluator< SparseXprType, DiagCoeffType, SDP_AsCwiseProduct >
 
struct  sparse_diagonal_product_evaluator< SparseXprType, DiagonalCoeffType, SDP_AsScalarProduct >
 
struct  sparse_eval
 
struct  sparse_eval< T, 1, 1, Flags >
 
struct  sparse_eval< T, 1, Cols, Flags >
 
struct  sparse_eval< T, Rows, 1, Flags >
 
class  sparse_matrix_block_impl
 
struct  sparse_solve_triangular_selector
 
struct  sparse_solve_triangular_selector< Lhs, Rhs, Mode, Lower, ColMajor >
 
struct  sparse_solve_triangular_selector< Lhs, Rhs, Mode, Lower, RowMajor >
 
struct  sparse_solve_triangular_selector< Lhs, Rhs, Mode, Upper, ColMajor >
 
struct  sparse_solve_triangular_selector< Lhs, Rhs, Mode, Upper, RowMajor >
 
struct  sparse_solve_triangular_sparse_selector
 
struct  sparse_solve_triangular_sparse_selector< Lhs, Rhs, Mode, UpLo, ColMajor >
 
struct  sparse_sparse_product_with_pruning_selector
 
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor, ColMajor >
 
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor, RowMajor >
 
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor, ColMajor >
 
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor, RowMajor >
 
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor, ColMajor >
 
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor, RowMajor >
 
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor, ColMajor >
 
struct  sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor, RowMajor >
 
struct  sparse_sparse_to_dense_product_selector
 
struct  sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, ColMajor, ColMajor >
 
struct  sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, ColMajor, RowMajor >
 
struct  sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, RowMajor, ColMajor >
 
struct  sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, RowMajor, RowMajor >
 
struct  sparse_time_dense_product_impl
 
struct  sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, AlphaType, ColMajor, true >
 
struct  sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, typename DenseResType::Scalar, ColMajor, false >
 
struct  sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, typename DenseResType::Scalar, RowMajor, false >
 
struct  sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, typename DenseResType::Scalar, RowMajor, true >
 
struct  sparse_vector_assign_selector
 
struct  sparse_vector_assign_selector< Dest, Src, SVA_Inner >
 
struct  sparse_vector_assign_selector< Dest, Src, SVA_Outer >
 
struct  sparse_vector_assign_selector< Dest, Src, SVA_RuntimeSwitch >
 
class  SparseLUImpl
 
class  SparseRefBase
 
struct  SparseSelfAdjoint2Sparse
 
struct  SparseSelfAdjointShape
 
class  SparseTransposeImpl
 
class  SparseTransposeImpl< MatrixType, CompressedAccessBit >
 
struct  SparseTriangularShape
 
struct  static_assertion
 
struct  static_assertion< true >
 
struct  stem_function
 
struct  storage_kind_to_evaluator_kind
 
struct  storage_kind_to_evaluator_kind< Sparse >
 
struct  storage_kind_to_shape
 
struct  storage_kind_to_shape< Dense >
 
struct  storage_kind_to_shape< DiagonalShape >
 
struct  storage_kind_to_shape< PermutationStorage >
 
struct  storage_kind_to_shape< SolverStorage >
 
struct  storage_kind_to_shape< Sparse >
 
struct  storage_kind_to_shape< TranspositionsStorage >
 
struct  sub_assign_op
 
struct  svd_precondition_2x2_block_to_be_real
 
struct  svd_precondition_2x2_block_to_be_real< MatrixType, QRPreconditioner, false >
 
struct  svd_precondition_2x2_block_to_be_real< MatrixType, QRPreconditioner, true >
 
struct  swap_assign_op
 
struct  symm_pack_lhs
 
struct  symm_pack_rhs
 
struct  take_matrix_for_product
 
struct  take_matrix_for_product< Transform< Scalar, Dim, Mode, Options > >
 
struct  take_matrix_for_product< Transform< Scalar, Dim, Projective, Options > >
 
struct  ternary_evaluator
 
struct  ternary_evaluator< CwiseTernaryOp< TernaryOp, Arg1, Arg2, Arg3 >, IndexBased, IndexBased >
 
struct  ternary_result_of_select
 
struct  ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, sizeof(has_std_result_type)>
 
struct  ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, sizeof(has_tr1_result)>
 
struct  traits
 
struct  traits< AngleAxis< _Scalar > >
 
struct  traits< Array< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > >
 
struct  traits< ArrayWrapper< ExpressionType > >
 
struct  traits< BandMatrix< _Scalar, _Rows, _Cols, _Supers, _Subs, _Options > >
 
struct  traits< BandMatrixWrapper< _CoefficientsType, _Rows, _Cols, _Supers, _Subs, _Options > >
 
struct  traits< BDCSVD< _MatrixType > >
 
struct  traits< BiCGSTAB< _MatrixType, _Preconditioner > >
 
struct  traits< Block< XprType, BlockRows, BlockCols, InnerPanel > >
 
struct  traits< BlockSparseMatrix< _Scalar, _BlockAtCompileTime, _Options, _Index > >
 
struct  traits< BlockSparseMatrixView< BlockSparseMatrixT > >
 
struct  traits< BlockSparseTimeDenseProduct< BlockSparseMatrixT, VecType > >
 
struct  traits< ColPivHouseholderQR< _MatrixType > >
 
struct  traits< CompleteOrthogonalDecomposition< _MatrixType > >
 
struct  traits< ConjugateGradient< _MatrixType, _UpLo, _Preconditioner > >
 
struct  traits< const T >
 
struct  traits< CwiseBinaryOp< BinaryOp, Lhs, Rhs > >
 
struct  traits< CwiseNullaryOp< NullaryOp, PlainObjectType > >
 
struct  traits< CwiseTernaryOp< TernaryOp, Arg1, Arg2, Arg3 > >
 
struct  traits< CwiseUnaryOp< UnaryOp, XprType > >
 
struct  traits< CwiseUnaryView< ViewOp, MatrixType > >
 
struct  traits< Diagonal< const SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >
 
struct  traits< Diagonal< MatrixType, DiagIndex > >
 
struct  traits< Diagonal< SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >
 
struct  traits< DiagonalMatrix< _Scalar, SizeAtCompileTime, MaxSizeAtCompileTime > >
 
struct  traits< DiagonalWrapper< _DiagonalVectorType > >
 
struct  traits< DynamicSparseMatrix< _Scalar, _Options, _StorageIndex > >
 
struct  traits< EvalToTemp< ArgType > >
 
struct  traits< ForceAlignedAccess< ExpressionType > >
 
struct  traits< FullPivHouseholderQR< _MatrixType > >
 
struct  traits< FullPivHouseholderQRMatrixQReturnType< MatrixType > >
 
struct  traits< FullPivLU< _MatrixType > >
 
struct  traits< HessenbergDecompositionMatrixHReturnType< MatrixType > >
 
struct  traits< Homogeneous< MatrixType, Direction > >
 
struct  traits< homogeneous_left_product_impl< Homogeneous< MatrixType, Vertical >, Lhs > >
 
struct  traits< homogeneous_right_product_impl< Homogeneous< MatrixType, Horizontal >, Rhs > >
 
struct  traits< HouseholderSequence< VectorsType, CoeffsType, Side > >
 
struct  traits< image_retval_base< DecompositionType > >
 
struct  traits< Inverse< XprType > >
 
struct  traits< JacobiSVD< _MatrixType, QRPreconditioner > >
 
struct  traits< kernel_retval_base< DecompositionType > >
 
struct  traits< LeastSquaresConjugateGradient< _MatrixType, _Preconditioner > >
 
struct  traits< Map< const Quaternion< _Scalar >, _Options > >
 
struct  traits< Map< const SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
 
struct  traits< Map< PermutationMatrix< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex >, _PacketAccess > >
 
struct  traits< Map< PlainObjectType, MapOptions, StrideType > >
 
struct  traits< Map< Quaternion< _Scalar >, _Options > >
 
struct  traits< Map< SparseMatrix< MatScalar, MatOptions, MatIndex >, Options, StrideType > >
 
struct  traits< Map< Transpositions< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex >, _PacketAccess > >
 
struct  traits< MappedSparseMatrix< _Scalar, _Flags, _StorageIndex > >
 
struct  traits< Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > >
 
struct  traits< MatrixWrapper< ExpressionType > >
 
struct  traits< NestByValue< ExpressionType > >
 
struct  traits< PartialPivLU< _MatrixType > >
 
struct  traits< PartialReduxExpr< MatrixType, MemberOp, Direction > >
 
struct  traits< PermutationMatrix< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex > >
 
struct  traits< PermutationWrapper< _IndicesType > >
 
struct  traits< Product< Lhs, Rhs, Option > >
 
struct  traits< Quaternion< _Scalar, _Options > >
 
struct  traits< Ref< _PlainObjectType, _Options, _StrideType > >
 
struct  traits< Ref< const SparseMatrix< MatScalar, MatOptions, MatIndex >, _Options, _StrideType > >
 
struct  traits< Ref< const SparseVector< MatScalar, MatOptions, MatIndex >, _Options, _StrideType > >
 
struct  traits< Ref< SparseMatrix< MatScalar, MatOptions, MatIndex >, _Options, _StrideType > >
 
struct  traits< Ref< SparseVector< MatScalar, MatOptions, MatIndex >, _Options, _StrideType > >
 
struct  traits< RefBase< Derived > >
 
struct  traits< Replicate< MatrixType, RowFactor, ColFactor > >
 
struct  traits< ReturnByValue< Derived > >
 
struct  traits< Reverse< MatrixType, Direction > >
 
struct  traits< Rotation2D< _Scalar > >
 
struct  traits< Select< ConditionMatrixType, ThenMatrixType, ElseMatrixType > >
 
struct  traits< SelfAdjointView< MatrixType, UpLo > >
 
struct  traits< SimplicialCholesky< _MatrixType, _UpLo, _Ordering > >
 
struct  traits< SimplicialLDLT< _MatrixType, _UpLo, _Ordering > >
 
struct  traits< SimplicialLLT< _MatrixType, _UpLo, _Ordering > >
 
struct  traits< Solve< Decomposition, RhsType > >
 
struct  traits< SolveWithGuess< Decomposition, RhsType, GuessType > >
 
struct  traits< SparseCompressedBase< Derived > >
 
struct  traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >
 
struct  traits< SparseQR_QProduct< SparseQRType, Derived > >
 
struct  traits< SparseQRMatrixQReturnType< SparseQRType > >
 
struct  traits< SparseQRMatrixQTransposeReturnType< SparseQRType > >
 
struct  traits< SparseRefBase< Derived > >
 
struct  traits< SparseSelfAdjointView< MatrixType, Mode > >
 
struct  traits< SparseSymmetricPermutationProduct< MatrixType, Mode > >
 
struct  traits< SparseVector< _Scalar, _Options, _StorageIndex > >
 
struct  traits< SparseView< MatrixType > >
 
struct  traits< SPQR_QProduct< SPQRType, Derived > >
 
struct  traits< SPQRMatrixQReturnType< SPQRType > >
 
struct  traits< SPQRMatrixQTransposeReturnType< SPQRType > >
 
struct  traits< Transform< _Scalar, _Dim, _Mode, _Options > >
 
struct  traits< Transpose< MatrixType > >
 
struct  traits< Transpose< TranspositionsBase< Derived > > >
 
struct  traits< Transpositions< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex > >
 
struct  traits< TranspositionsWrapper< _IndicesType > >
 
struct  traits< triangular_solve_retval< Side, TriangularType, Rhs > >
 
struct  traits< TriangularView< MatrixType, _Mode > >
 
struct  traits< TridiagonalizationMatrixTReturnType< MatrixType > >
 
struct  traits< VectorBlock< VectorType, Size > >
 
struct  transfer_constness
 
struct  transform_construct_from_matrix
 
struct  transform_construct_from_matrix< Other, AffineCompact, Options, Dim, HDim, HDim, HDim >
 
struct  transform_construct_from_matrix< Other, Mode, Options, Dim, HDim, Dim, Dim >
 
struct  transform_construct_from_matrix< Other, Mode, Options, Dim, HDim, Dim, HDim >
 
struct  transform_construct_from_matrix< Other, Mode, Options, Dim, HDim, HDim, HDim >
 
struct  transform_left_product_impl
 
struct  transform_left_product_impl< Other, AffineCompact, Options, Dim, HDim, Dim, HDim >
 
struct  transform_left_product_impl< Other, AffineCompact, Options, Dim, HDim, HDim, HDim >
 
struct  transform_left_product_impl< Other, Mode, Options, Dim, HDim, Dim, Dim >
 
struct  transform_left_product_impl< Other, Mode, Options, Dim, HDim, Dim, HDim >
 
struct  transform_left_product_impl< Other, Mode, Options, Dim, HDim, HDim, HDim >
 
struct  transform_make_affine
 
struct  transform_make_affine< AffineCompact >
 
struct  transform_product_result
 
struct  transform_right_product_impl
 
struct  transform_right_product_impl< TransformType, MatrixType, 0, RhsCols >
 
struct  transform_right_product_impl< TransformType, MatrixType, 1, RhsCols >
 
struct  transform_right_product_impl< TransformType, MatrixType, 2, 1 >
 
struct  transform_right_product_impl< TransformType, MatrixType, 2, RhsCols >
 
struct  transform_take_affine_part
 
struct  transform_take_affine_part< Transform< Scalar, Dim, AffineCompact, Options > >
 
struct  transform_traits
 
struct  transform_transform_product_impl
 
struct  transform_transform_product_impl< Transform< Scalar, Dim, AffineCompact, LhsOptions >, Transform< Scalar, Dim, Projective, RhsOptions >, true >
 
struct  transform_transform_product_impl< Transform< Scalar, Dim, LhsMode, LhsOptions >, Transform< Scalar, Dim, RhsMode, RhsOptions >, false >
 
struct  transform_transform_product_impl< Transform< Scalar, Dim, LhsMode, LhsOptions >, Transform< Scalar, Dim, RhsMode, RhsOptions >, true >
 
struct  transform_transform_product_impl< Transform< Scalar, Dim, Projective, LhsOptions >, Transform< Scalar, Dim, AffineCompact, RhsOptions >, true >
 
struct  TransposeImpl_base
 
struct  TransposeImpl_base< MatrixType, false >
 
struct  transposition_matrix_product
 
struct  Triangular2Dense
 
struct  Triangular2Triangular
 
struct  triangular_assignment_loop
 
struct  triangular_assignment_loop< Kernel, Mode, 0, SetOpposite >
 
struct  triangular_assignment_loop< Kernel, Mode, Dynamic, SetOpposite >
 
class  triangular_dense_assignment_kernel
 
class  triangular_dense_assignment_kernel< UpLo, SelfAdjoint, SetOpposite, DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version >
 
struct  triangular_matrix_vector_product
 
struct  triangular_matrix_vector_product< Index, Mode, LhsScalar, ConjLhs, RhsScalar, ConjRhs, ColMajor, Version >
 
struct  triangular_matrix_vector_product< Index, Mode, LhsScalar, ConjLhs, RhsScalar, ConjRhs, RowMajor, Version >
 
struct  triangular_matrix_vector_product_trmv
 
struct  triangular_product_impl
 
struct  triangular_product_impl< Mode, false, Lhs, true, Rhs, false >
 
struct  triangular_product_impl< Mode, LhsIsTriangular, Lhs, false, Rhs, false >
 
struct  triangular_product_impl< Mode, true, Lhs, false, Rhs, true >
 
struct  triangular_solve_matrix
 
struct  triangular_solve_matrix< Scalar, Index, OnTheLeft, Mode, Conjugate, TriStorageOrder, ColMajor >
 
struct  triangular_solve_matrix< Scalar, Index, OnTheRight, Mode, Conjugate, TriStorageOrder, ColMajor >
 
struct  triangular_solve_matrix< Scalar, Index, Side, Mode, Conjugate, TriStorageOrder, RowMajor >
 
struct  triangular_solve_retval
 
struct  triangular_solve_vector
 
struct  triangular_solve_vector< LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, ColMajor >
 
struct  triangular_solve_vector< LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor >
 
struct  triangular_solve_vector< LhsScalar, RhsScalar, Index, OnTheRight, Mode, Conjugate, StorageOrder >
 
struct  triangular_solver_selector
 
struct  triangular_solver_selector< Lhs, Rhs, OnTheLeft, Mode, CompleteUnrolling, 1 >
 
struct  triangular_solver_selector< Lhs, Rhs, OnTheRight, Mode, CompleteUnrolling, 1 >
 
struct  triangular_solver_selector< Lhs, Rhs, Side, Mode, NoUnrolling, 1 >
 
struct  triangular_solver_selector< Lhs, Rhs, Side, Mode, NoUnrolling, Dynamic >
 
struct  triangular_solver_unroller
 
struct  triangular_solver_unroller< Lhs, Rhs, Mode, LoopIndex, Size, false >
 
struct  triangular_solver_unroller< Lhs, Rhs, Mode, LoopIndex, Size, true >
 
struct  tribb_kernel
 
struct  tridiagonalization_inplace_selector
 
struct  tridiagonalization_inplace_selector< MatrixType, 1, IsComplex >
 
struct  tridiagonalization_inplace_selector< MatrixType, 3, false >
 
struct  TridiagonalizationMatrixTReturnType
 
class  TridiagonalMatrix
 Represents a tridiagonal matrix with a compact banded storage. More...
 
struct  TripletComp
 
struct  trmv_selector
 
struct  trmv_selector< Mode, ColMajor >
 
struct  trmv_selector< Mode, RowMajor >
 
class  trsolve_traits
 
struct  true_type
 
struct  type_casting_traits
 
struct  type_casting_traits< double, float >
 
struct  type_casting_traits< float, double >
 
struct  type_casting_traits< float, int >
 
struct  type_casting_traits< int, float >
 
struct  umeyama_transform_matrix_type
 
struct  unaligned_dense_assignment_loop
 
struct  unaligned_dense_assignment_loop< false >
 
struct  unary_evaluator
 
struct  unary_evaluator< ArrayWrapper< TArgType > >
 
struct  unary_evaluator< Block< ArgType, BlockRows, BlockCols, InnerPanel >, IndexBased >
 
struct  unary_evaluator< Block< ArgType, BlockRows, BlockCols, InnerPanel >, IteratorBased >
 
struct  unary_evaluator< Block< const SparseMatrix< _Scalar, _Options, _StorageIndex >, BlockRows, BlockCols, true >, IteratorBased >
 
struct  unary_evaluator< Block< SparseMatrix< _Scalar, _Options, _StorageIndex >, BlockRows, BlockCols, true >, IteratorBased >
 
struct  unary_evaluator< CwiseUnaryOp< UnaryOp, ArgType >, IndexBased >
 
struct  unary_evaluator< CwiseUnaryOp< UnaryOp, ArgType >, IteratorBased >
 
struct  unary_evaluator< CwiseUnaryView< UnaryOp, ArgType >, IndexBased >
 
struct  unary_evaluator< CwiseUnaryView< ViewOp, ArgType >, IteratorBased >
 
struct  unary_evaluator< Homogeneous< ArgType, Direction >, IndexBased >
 
struct  unary_evaluator< Inverse< ArgType > >
 
struct  unary_evaluator< MatrixWrapper< TArgType > >
 
struct  unary_evaluator< Replicate< ArgType, RowFactor, ColFactor > >
 
struct  unary_evaluator< Reverse< ArgType, Direction > >
 
struct  unary_evaluator< SparseView< ArgType >, IndexBased >
 
struct  unary_evaluator< SparseView< ArgType >, IteratorBased >
 
struct  unary_evaluator< SparseView< Product< Lhs, Rhs, Options > >, IteratorBased >
 
struct  unary_evaluator< Transpose< ArgType >, IndexBased >
 
struct  unary_evaluator< Transpose< ArgType >, IteratorBased >
 
struct  unary_evaluator< TriangularView< ArgType, Mode >, IteratorBased >
 
struct  unary_evaluator< TriangularView< MatrixType, Mode >, IndexBased >
 
struct  unary_result_of_select
 
struct  unary_result_of_select< Func, ArgType, sizeof(has_std_result_type)>
 
struct  unary_result_of_select< Func, ArgType, sizeof(has_tr1_result)>
 
struct  unitOrthogonal_selector
 
struct  unitOrthogonal_selector< Derived, 2 >
 
struct  unitOrthogonal_selector< Derived, 3 >
 
struct  unpacket_traits
 
struct  unpacket_traits< DoublePacket< Packet > >
 
struct  unpacket_traits< Packet16f >
 
struct  unpacket_traits< Packet16i >
 
struct  unpacket_traits< Packet1cd >
 
struct  unpacket_traits< Packet2cd >
 
struct  unpacket_traits< Packet2cf >
 
struct  unpacket_traits< Packet2d >
 
struct  unpacket_traits< Packet4cf >
 
struct  unpacket_traits< Packet4d >
 
struct  unpacket_traits< Packet4f >
 
struct  unpacket_traits< Packet4i >
 
struct  unpacket_traits< Packet8d >
 
struct  unpacket_traits< Packet8f >
 
struct  unpacket_traits< Packet8i >
 
class  UpperBidiagonalization
 
class  variable_if_dynamic
 
class  variable_if_dynamic< T, Dynamic >
 
class  variable_if_dynamicindex
 
class  variable_if_dynamicindex< T, DynamicIndex >
 
struct  vectorwise_reverse_inplace_impl
 
struct  vectorwise_reverse_inplace_impl< Horizontal >
 
struct  vectorwise_reverse_inplace_impl< Vertical >
 
class  visitor_evaluator
 
struct  visitor_impl
 
struct  visitor_impl< Visitor, Derived, 1 >
 
struct  visitor_impl< Visitor, Derived, Dynamic >
 
class  vml_assign_traits
 

Typedefs

typedef __vector float Packet4f
 
typedef __vector int Packet4i
 
typedef __vector unsigned int Packet4ui
 
typedef __vector __bool int Packet4bi
 
typedef __vector short int Packet8i
 
typedef __vector unsigned char Packet16uc
 
typedef __m256 Packet8f
 
typedef __m256d Packet4d
 
typedef __m512 Packet16f
 
typedef __m512i Packet16i
 
typedef __m512d Packet8d
 
typedef float32x2_t Packet2f
 
typedef int32x2_t Packet2i
 
typedef __m128d Packet2d
 
typedef const char * SsePrefetchPtrType
 
typedef __vector unsigned long long Packet2ul
 
typedef __vector long long Packet2l
 
typedef std::ptrdiff_t IntPtr
 
typedef std::size_t UIntPtr
 

Enumerations

enum  SignMatrix { PositiveSemiDef , NegativeSemiDef , ZeroSign , Indefinite }
 
enum  { meta_floor_log2_terminate , meta_floor_log2_move_up , meta_floor_log2_move_down , meta_floor_log2_bogus }
 
enum  PermPermProduct_t { PermPermProduct }
 
enum  ComparisonName {
  cmp_EQ = 0 , cmp_LT = 1 , cmp_LE = 2 , cmp_UNORD = 3 ,
  cmp_NEQ = 4 , cmp_GT = 5 , cmp_GE = 6
}
 
enum  { SDP_AsScalarProduct , SDP_AsCwiseProduct }
 
enum  { SVA_RuntimeSwitch , SVA_Inner , SVA_Outer }
 
enum  { LUNoMarker = 3 }
 
enum  { emptyIdxLU = -1 }
 
enum  MemType {
  LUSUP , UCOL , LSUB , USUB ,
  LLVL , ULVL
}
 
enum  { PreconditionIfMoreColsThanRows , PreconditionIfMoreRowsThanCols }
 

Functions

template<typename MatrixType , typename VectorType >
static Index llt_rank_update_lower (MatrixType &mat, const VectorType &vec, const typename MatrixType::RealScalar &sigma)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pset1< Packet2cf > (const std::complex< float > &from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pload< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf ploadu< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf ploaddup< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
 
template<>
EIGEN_DEVICE_FUNC Packet2cf pgather< std::complex< float >, Packet2cf > (const std::complex< float > *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< float >, Packet2cf > (std::complex< float > *to, const Packet2cf &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE Packet2cf padd< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf psub< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pnegate (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pconj (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pmul< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pand< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf por< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pxor< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pandnot< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE void prefetch< std::complex< float > > (const std::complex< float > *addr)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > pfirst< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf preverse (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf preduxp< Packet2cf > (const Packet2cf *vecs)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux_mul< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pdiv< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pcplxflip< Packet2cf > (const Packet2cf &x)
 
EIGEN_STRONG_INLINE void ptranspose (PacketBlock< Packet2cf, 2 > &kernel)
 
static _EIGEN_DECLARE_CONST_Packet4f (1, 1.0f)
 
static _EIGEN_DECLARE_CONST_Packet4f (half, 0.5f)
 
static _EIGEN_DECLARE_CONST_Packet4i (0x7f, 0x7f)
 
static _EIGEN_DECLARE_CONST_Packet4i (23, 23)
 
static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT (inv_mant_mask, ~0x7f800000)
 
static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT (min_norm_pos, 0x00800000)
 
static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT (minus_inf, 0xff800000)
 
static _EIGEN_DECLARE_CONST_Packet4f_FROM_INT (minus_nan, 0xffffffff)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_SQRTHF, 0.707106781186547524f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p0, 7.0376836292E-2f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p1, - 1.1514610310E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p2, 1.1676998740E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p3, - 1.2420140846E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p4,+1.4249322787E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p5, - 1.6668057665E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p6,+2.0000714765E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p7, - 2.4999993993E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_p8,+3.3333331174E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_q1, -2.12194440e-4f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_log_q2, 0.693359375f)
 
static _EIGEN_DECLARE_CONST_Packet4f (exp_hi, 88.3762626647950f)
 
static _EIGEN_DECLARE_CONST_Packet4f (exp_lo, -88.3762626647949f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_LOG2EF, 1.44269504088896341f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_exp_C1, 0.693359375f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_exp_C2, -2.12194440e-4f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_exp_p0, 1.9875691500E-4f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_exp_p1, 1.3981999507E-3f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_exp_p2, 8.3334519073E-3f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_exp_p3, 4.1665795894E-2f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_exp_p4, 1.6666665459E-1f)
 
static _EIGEN_DECLARE_CONST_Packet4f (cephes_exp_p5, 5.0000001201E-1f)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f plog< Packet4f > (const Packet4f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp< Packet4f > (const Packet4f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f prsqrt< Packet4f > (const Packet4f &x)
 
static _EIGEN_DECLARE_CONST_FAST_Packet4f (ZERO, 0)
 
static _EIGEN_DECLARE_CONST_FAST_Packet4i (ZERO, 0)
 
static _EIGEN_DECLARE_CONST_FAST_Packet4i (ONE, 1)
 
static _EIGEN_DECLARE_CONST_FAST_Packet4i (MINUS16,-16)
 
static _EIGEN_DECLARE_CONST_FAST_Packet4i (MINUS1,-1)
 
std::ostream & operator<< (std::ostream &s, const Packet16uc &v)
 
std::ostream & operator<< (std::ostream &s, const Packet4f &v)
 
std::ostream & operator<< (std::ostream &s, const Packet4i &v)
 
std::ostream & operator<< (std::ostream &s, const Packet4ui &v)
 
template<>
EIGEN_STRONG_INLINE Packet4f pload< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i pload< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< float > (float *to, const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< int > (int *to, const Packet4i &from)
 
template<>
EIGEN_STRONG_INLINE Packet4f pset1< Packet4f > (const float &from)
 
template<>
EIGEN_STRONG_INLINE Packet4i pset1< Packet4i > (const int &from)
 
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet4f > (const float *a, Packet4f &a0, Packet4f &a1, Packet4f &a2, Packet4f &a3)
 
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet4i > (const int *a, Packet4i &a0, Packet4i &a1, Packet4i &a2, Packet4i &a3)
 
template<>
EIGEN_DEVICE_FUNC Packet4f pgather< float, Packet4f > (const float *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet4i pgather< int, Packet4i > (const int *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< float, Packet4f > (float *to, const Packet4f &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< int, Packet4i > (int *to, const Packet4i &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE Packet4f plset< Packet4f > (const float &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i plset< Packet4i > (const int &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f padd< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i padd< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f psub< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i psub< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pnegate (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pnegate (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pconj (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pconj (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmul< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmul< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pdiv< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pdiv< Packet4i > (const Packet4i &, const Packet4i &)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmadd (const Packet4f &a, const Packet4f &b, const Packet4f &c)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmadd (const Packet4i &a, const Packet4i &b, const Packet4i &c)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmin< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmin< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmax< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmax< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pand< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pand< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f por< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i por< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pxor< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pxor< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pandnot< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pandnot< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pround< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pceil< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pfloor< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i ploadu< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f ploadu< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f ploaddup< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i ploaddup< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< int > (int *to, const Packet4i &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< float > (float *to, const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE void prefetch< float > (const float *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< int > (const int *addr)
 
template<>
EIGEN_STRONG_INLINE float pfirst< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int pfirst< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f preverse (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i preverse (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pabs (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pabs (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float predux< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f preduxp< Packet4f > (const Packet4f *vecs)
 
template<>
EIGEN_STRONG_INLINE int predux< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i preduxp< Packet4i > (const Packet4i *vecs)
 
template<>
EIGEN_STRONG_INLINE float predux_mul< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int predux_mul< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float predux_min< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int predux_min< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float predux_max< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int predux_max< Packet4i > (const Packet4i &a)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet4f, 4 > &kernel)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet4i, 4 > &kernel)
 
template<>
EIGEN_STRONG_INLINE Packet4i pblend (const Selector< 4 > &ifPacket, const Packet4i &thenPacket, const Packet4i &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet4f pblend (const Selector< 4 > &ifPacket, const Packet4f &thenPacket, const Packet4f &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet4cf padd< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf psub< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pnegate (const Packet4cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pconj (const Packet4cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pmul< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pand< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf por< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pxor< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pandnot< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pload< Packet4cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet4cf ploadu< Packet4cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pset1< Packet4cf > (const std::complex< float > &from)
 
template<>
EIGEN_STRONG_INLINE Packet4cf ploaddup< Packet4cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< float > > (std::complex< float > *to, const Packet4cf &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< float > > (std::complex< float > *to, const Packet4cf &from)
 
template<>
EIGEN_DEVICE_FUNC Packet4cf pgather< std::complex< float >, Packet4cf > (const std::complex< float > *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< float >, Packet4cf > (std::complex< float > *to, const Packet4cf &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > pfirst< Packet4cf > (const Packet4cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet4cf preverse (const Packet4cf &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux< Packet4cf > (const Packet4cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet4cf preduxp< Packet4cf > (const Packet4cf *vecs)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux_mul< Packet4cf > (const Packet4cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pdiv< Packet4cf > (const Packet4cf &a, const Packet4cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pcplxflip< Packet4cf > (const Packet4cf &x)
 
template<>
EIGEN_STRONG_INLINE Packet2cd padd< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd psub< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pnegate (const Packet2cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pconj (const Packet2cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pmul< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pand< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd por< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pxor< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pandnot< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pload< Packet2cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cd ploadu< Packet2cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pset1< Packet2cd > (const std::complex< double > &from)
 
template<>
EIGEN_STRONG_INLINE Packet2cd ploaddup< Packet2cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< double > > (std::complex< double > *to, const Packet2cd &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< double > > (std::complex< double > *to, const Packet2cd &from)
 
template<>
EIGEN_DEVICE_FUNC Packet2cd pgather< std::complex< double >, Packet2cd > (const std::complex< double > *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< double >, Packet2cd > (std::complex< double > *to, const Packet2cd &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > pfirst< Packet2cd > (const Packet2cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cd preverse (const Packet2cd &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > predux< Packet2cd > (const Packet2cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cd preduxp< Packet2cd > (const Packet2cd *vecs)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > predux_mul< Packet2cd > (const Packet2cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pdiv< Packet2cd > (const Packet2cd &a, const Packet2cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pcplxflip< Packet2cd > (const Packet2cd &x)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet4cf, 4 > &kernel)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet2cd, 2 > &kernel)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pinsertfirst (const Packet4cf &a, std::complex< float > b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pinsertfirst (const Packet2cd &a, std::complex< double > b)
 
template<>
EIGEN_STRONG_INLINE Packet4cf pinsertlast (const Packet4cf &a, std::complex< float > b)
 
template<>
EIGEN_STRONG_INLINE Packet2cd pinsertlast (const Packet2cd &a, std::complex< double > b)
 
Packet8i pshiftleft (Packet8i v, int n)
 
Packet8f pshiftright (Packet8f v, int n)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f psin< Packet8f > (const Packet8f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f plog< Packet8f > (const Packet8f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f pexp< Packet8f > (const Packet8f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f ptanh< Packet8f > (const Packet8f &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d pexp< Packet4d > (const Packet4d &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f psqrt< Packet8f > (const Packet8f &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d psqrt< Packet4d > (const Packet4d &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f prsqrt< Packet8f > (const Packet8f &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d prsqrt< Packet4d > (const Packet4d &x)
 
template<>
EIGEN_STRONG_INLINE Packet8f pset1< Packet8f > (const float &from)
 
template<>
EIGEN_STRONG_INLINE Packet4d pset1< Packet4d > (const double &from)
 
template<>
EIGEN_STRONG_INLINE Packet8i pset1< Packet8i > (const int &from)
 
template<>
EIGEN_STRONG_INLINE Packet8f pload1< Packet8f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4d pload1< Packet4d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet8f plset< Packet8f > (const float &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d plset< Packet4d > (const double &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f padd< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d padd< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f psub< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d psub< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f pnegate (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d pnegate (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f pconj (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d pconj (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE Packet8i pconj (const Packet8i &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f pmul< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pmul< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f pdiv< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pdiv< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8i pdiv< Packet8i > (const Packet8i &, const Packet8i &)
 
template<>
EIGEN_STRONG_INLINE Packet8f pmin< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pmin< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f pmax< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pmax< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f pround< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d pround< Packet4d > (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f pceil< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d pceil< Packet4d > (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f pfloor< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d pfloor< Packet4d > (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f pand< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pand< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f por< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d por< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f pxor< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pxor< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f pandnot< Packet8f > (const Packet8f &a, const Packet8f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pandnot< Packet4d > (const Packet4d &a, const Packet4d &b)
 
template<>
EIGEN_STRONG_INLINE Packet8f pload< Packet8f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4d pload< Packet4d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet8i pload< Packet8i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet8f ploadu< Packet8f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4d ploadu< Packet4d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet8i ploadu< Packet8i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet8f ploaddup< Packet8f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4d ploaddup< Packet4d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet8f ploadquad< Packet8f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< float > (float *to, const Packet8f &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< double > (double *to, const Packet4d &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< int > (int *to, const Packet8i &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< float > (float *to, const Packet8f &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< double > (double *to, const Packet4d &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< int > (int *to, const Packet8i &from)
 
template<>
EIGEN_DEVICE_FUNC Packet8f pgather< float, Packet8f > (const float *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet4d pgather< double, Packet4d > (const double *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< float, Packet8f > (float *to, const Packet8f &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< double, Packet4d > (double *to, const Packet4d &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE void pstore1< Packet8f > (float *to, const float &a)
 
template<>
EIGEN_STRONG_INLINE void pstore1< Packet4d > (double *to, const double &a)
 
template<>
EIGEN_STRONG_INLINE void pstore1< Packet8i > (int *to, const int &a)
 
template<>
EIGEN_STRONG_INLINE void prefetch< float > (const float *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< double > (const double *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< int > (const int *addr)
 
template<>
EIGEN_STRONG_INLINE float pfirst< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE double pfirst< Packet4d > (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE int pfirst< Packet8i > (const Packet8i &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f preverse (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d preverse (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f pabs (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d pabs (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f preduxp< Packet8f > (const Packet8f *vecs)
 
template<>
EIGEN_STRONG_INLINE Packet4d preduxp< Packet4d > (const Packet4d *vecs)
 
template<>
EIGEN_STRONG_INLINE float predux< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE double predux< Packet4d > (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f predux_downto4< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE float predux_mul< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_mul< Packet4d > (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE float predux_min< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_min< Packet4d > (const Packet4d &a)
 
template<>
EIGEN_STRONG_INLINE float predux_max< Packet8f > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_max< Packet4d > (const Packet4d &a)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet8f, 8 > &kernel)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet8f, 4 > &kernel)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet4d, 4 > &kernel)
 
template<>
EIGEN_STRONG_INLINE Packet8f pblend (const Selector< 8 > &ifPacket, const Packet8f &thenPacket, const Packet8f &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet4d pblend (const Selector< 4 > &ifPacket, const Packet4d &thenPacket, const Packet4d &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet8f pinsertfirst (const Packet8f &a, float b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pinsertfirst (const Packet4d &a, double b)
 
template<>
EIGEN_STRONG_INLINE Packet8f pinsertlast (const Packet8f &a, float b)
 
template<>
EIGEN_STRONG_INLINE Packet4d pinsertlast (const Packet4d &a, double b)
 
template<>
EIGEN_STRONG_INLINE Packet8i pcast< Packet8f, Packet8i > (const Packet8f &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f pcast< Packet8i, Packet8f > (const Packet8i &a)
 
template<>
EIGEN_STRONG_INLINE Packet16f pset1< Packet16f > (const float &from)
 
template<>
EIGEN_STRONG_INLINE Packet8d pset1< Packet8d > (const double &from)
 
template<>
EIGEN_STRONG_INLINE Packet16i pset1< Packet16i > (const int &from)
 
template<>
EIGEN_STRONG_INLINE Packet16f pload1< Packet16f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet8d pload1< Packet8d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet16f plset< Packet16f > (const float &a)
 
template<>
EIGEN_STRONG_INLINE Packet8d plset< Packet8d > (const double &a)
 
template<>
EIGEN_STRONG_INLINE Packet16f padd< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d padd< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f psub< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d psub< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f pnegate (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE Packet8d pnegate (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE Packet16f pconj (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE Packet8d pconj (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE Packet16i pconj (const Packet16i &a)
 
template<>
EIGEN_STRONG_INLINE Packet16f pmul< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d pmul< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f pdiv< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d pdiv< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f pmin< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d pmin< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f pmax< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d pmax< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f pand< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d pand< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f por< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d por< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f pxor< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d pxor< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f pandnot< Packet16f > (const Packet16f &a, const Packet16f &b)
 
template<>
EIGEN_STRONG_INLINE Packet8d pandnot< Packet8d > (const Packet8d &a, const Packet8d &b)
 
template<>
EIGEN_STRONG_INLINE Packet16f pload< Packet16f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet8d pload< Packet8d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet16i pload< Packet16i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet16f ploadu< Packet16f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet8d ploadu< Packet8d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet16i ploadu< Packet16i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet16f ploaddup< Packet16f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet8d ploaddup< Packet8d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet16f ploadquad< Packet16f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet8d ploadquad< Packet8d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< float > (float *to, const Packet16f &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< double > (double *to, const Packet8d &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< int > (int *to, const Packet16i &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< float > (float *to, const Packet16f &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< double > (double *to, const Packet8d &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< int > (int *to, const Packet16i &from)
 
template<>
EIGEN_DEVICE_FUNC Packet16f pgather< float, Packet16f > (const float *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet8d pgather< double, Packet8d > (const double *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< float, Packet16f > (float *to, const Packet16f &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< double, Packet8d > (double *to, const Packet8d &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE void pstore1< Packet16f > (float *to, const float &a)
 
template<>
EIGEN_STRONG_INLINE void pstore1< Packet8d > (double *to, const double &a)
 
template<>
EIGEN_STRONG_INLINE void pstore1< Packet16i > (int *to, const int &a)
 
template<>
EIGEN_STRONG_INLINE void prefetch< float > (const float *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< double > (const double *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< int > (const int *addr)
 
template<>
EIGEN_STRONG_INLINE float pfirst< Packet16f > (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE double pfirst< Packet8d > (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE int pfirst< Packet16i > (const Packet16i &a)
 
template<>
EIGEN_STRONG_INLINE Packet16f preverse (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE Packet8d preverse (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE Packet16f pabs (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE Packet8d pabs (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE Packet16f preduxp< Packet16f > (const Packet16f *vecs)
 
template<>
EIGEN_STRONG_INLINE Packet8d preduxp< Packet8d > (const Packet8d *vecs)
 
template<>
EIGEN_STRONG_INLINE float predux< Packet16f > (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE double predux< Packet8d > (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE Packet8f predux_downto4< Packet16f > (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4d predux_downto4< Packet8d > (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE float predux_mul< Packet16f > (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_mul< Packet8d > (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE float predux_min< Packet16f > (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_min< Packet8d > (const Packet8d &a)
 
template<>
EIGEN_STRONG_INLINE float predux_max< Packet16f > (const Packet16f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_max< Packet8d > (const Packet8d &a)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet16f, 16 > &kernel)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet16f, 4 > &kernel)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet8d, 4 > &kernel)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet8d, 8 > &kernel)
 
template<>
EIGEN_STRONG_INLINE Packet16f pblend (const Selector< 16 > &, const Packet16f &, const Packet16f &)
 
template<>
EIGEN_STRONG_INLINE Packet8d pblend (const Selector< 8 > &, const Packet8d &, const Packet8d &)
 
uint32x2_t p2ui_CONJ_XOR ()
 
template<>
EIGEN_STRONG_INLINE Packet2cf pset1< Packet2cf > (const std::complex< float > &from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf padd< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf psub< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pnegate (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pconj (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pmul< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pand< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf por< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pxor< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pandnot< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pload< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf ploadu< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf ploaddup< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
 
template<>
EIGEN_DEVICE_FUNC Packet2cf pgather< std::complex< float >, Packet2cf > (const std::complex< float > *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< float >, Packet2cf > (std::complex< float > *to, const Packet2cf &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE void prefetch< std::complex< float > > (const std::complex< float > *addr)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > pfirst< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf preverse (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pcplxflip< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf preduxp< Packet2cf > (const Packet2cf *vecs)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux_mul< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pdiv< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp< Packet4f > (const Packet4f &_x)
 
template<>
EIGEN_STRONG_INLINE Packet4f pset1< Packet4f > (const float &from)
 
template<>
EIGEN_STRONG_INLINE Packet4i pset1< Packet4i > (const int32_t &from)
 
template<>
EIGEN_STRONG_INLINE Packet4f plset< Packet4f > (const float &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i plset< Packet4i > (const int32_t &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f padd< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i padd< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f psub< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i psub< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pnegate (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pnegate (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pconj (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pconj (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmul< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmul< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pdiv< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pdiv< Packet4i > (const Packet4i &, const Packet4i &)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmadd (const Packet4f &a, const Packet4f &b, const Packet4f &c)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmadd (const Packet4i &a, const Packet4i &b, const Packet4i &c)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmin< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmin< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmax< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmax< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pand< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pand< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f por< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i por< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pxor< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pxor< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pandnot< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pandnot< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pload< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i pload< Packet4i > (const int32_t *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f ploadu< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i ploadu< Packet4i > (const int32_t *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f ploaddup< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i ploaddup< Packet4i > (const int32_t *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< float > (float *to, const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< int32_t > (int32_t *to, const Packet4i &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< float > (float *to, const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< int32_t > (int32_t *to, const Packet4i &from)
 
template<>
EIGEN_DEVICE_FUNC Packet4f pgather< float, Packet4f > (const float *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet4i pgather< int32_t, Packet4i > (const int32_t *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< float, Packet4f > (float *to, const Packet4f &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< int32_t, Packet4i > (int32_t *to, const Packet4i &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE void prefetch< float > (const float *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< int32_t > (const int32_t *addr)
 
template<>
EIGEN_STRONG_INLINE float pfirst< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int32_t pfirst< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f preverse (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i preverse (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pabs (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pabs (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float predux< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f preduxp< Packet4f > (const Packet4f *vecs)
 
template<>
EIGEN_STRONG_INLINE int32_t predux< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i preduxp< Packet4i > (const Packet4i *vecs)
 
template<>
EIGEN_STRONG_INLINE float predux_mul< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int32_t predux_mul< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float predux_min< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int32_t predux_min< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float predux_max< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int32_t predux_max< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf padd< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf psub< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pnegate (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pconj (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pmul< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pand< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf por< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pxor< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pandnot< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pload< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf ploadu< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pset1< Packet2cf > (const std::complex< float > &from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf ploaddup< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
 
template<>
EIGEN_DEVICE_FUNC Packet2cf pgather< std::complex< float >, Packet2cf > (const std::complex< float > *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< float >, Packet2cf > (std::complex< float > *to, const Packet2cf &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE void prefetch< std::complex< float > > (const std::complex< float > *addr)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > pfirst< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf preverse (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf preduxp< Packet2cf > (const Packet2cf *vecs)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux_mul< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pdiv< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
EIGEN_STRONG_INLINE Packet2cf pcplxflip (const Packet2cf &x)
 
template<>
EIGEN_STRONG_INLINE Packet1cd padd< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd psub< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pnegate (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pconj (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pmul< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pand< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd por< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pxor< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pandnot< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pload< Packet1cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE Packet1cd ploadu< Packet1cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pset1< Packet1cd > (const std::complex< double > &from)
 
template<>
EIGEN_STRONG_INLINE Packet1cd ploaddup< Packet1cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< double > > (std::complex< double > *to, const Packet1cd &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< double > > (std::complex< double > *to, const Packet1cd &from)
 
template<>
EIGEN_STRONG_INLINE void prefetch< std::complex< double > > (const std::complex< double > *addr)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > pfirst< Packet1cd > (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd preverse (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > predux< Packet1cd > (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd preduxp< Packet1cd > (const Packet1cd *vecs)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > predux_mul< Packet1cd > (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pdiv< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
EIGEN_STRONG_INLINE Packet1cd pcplxflip (const Packet1cd &x)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pblend (const Selector< 2 > &ifPacket, const Packet2cf &thenPacket, const Packet2cf &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pinsertfirst (const Packet2cf &a, std::complex< float > b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pinsertfirst (const Packet1cd &, std::complex< double > b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pinsertlast (const Packet2cf &a, std::complex< float > b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pinsertlast (const Packet1cd &, std::complex< double > b)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f plog< Packet4f > (const Packet4f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp< Packet4f > (const Packet4f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp< Packet2d > (const Packet2d &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psin< Packet4f > (const Packet4f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pcos< Packet4f > (const Packet4f &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt< Packet4f > (const Packet4f &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d psqrt< Packet2d > (const Packet2d &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f prsqrt< Packet4f > (const Packet4f &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d prsqrt< Packet2d > (const Packet2d &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f ptanh< Packet4f > (const Packet4f &x)
 
template<>
EIGEN_STRONG_INLINE Packet4f pset1< Packet4f > (const float &from)
 
template<>
EIGEN_STRONG_INLINE Packet2d pset1< Packet2d > (const double &from)
 
template<>
EIGEN_STRONG_INLINE Packet4i pset1< Packet4i > (const int &from)
 
template<>
EIGEN_STRONG_INLINE Packet4f plset< Packet4f > (const float &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d plset< Packet2d > (const double &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i plset< Packet4i > (const int &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f padd< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d padd< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i padd< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f psub< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d psub< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i psub< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pnegate (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pnegate (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pnegate (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pconj (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pconj (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pconj (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmul< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pmul< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmul< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pdiv< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pdiv< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmadd (const Packet4i &a, const Packet4i &b, const Packet4i &c)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmin< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pmin< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmin< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmax< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pmax< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmax< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pand< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pand< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pand< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f por< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d por< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i por< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pxor< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pxor< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pxor< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pandnot< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pandnot< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pandnot< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pload< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet2d pload< Packet2d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i pload< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f ploadu< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet2d ploadu< Packet2d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i ploadu< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f ploaddup< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet2d ploaddup< Packet2d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i ploaddup< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< float > (float *to, const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< double > (double *to, const Packet2d &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< int > (int *to, const Packet4i &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< double > (double *to, const Packet2d &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< float > (float *to, const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< int > (int *to, const Packet4i &from)
 
template<>
EIGEN_DEVICE_FUNC Packet4f pgather< float, Packet4f > (const float *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet2d pgather< double, Packet2d > (const double *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet4i pgather< int, Packet4i > (const int *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< float, Packet4f > (float *to, const Packet4f &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< double, Packet2d > (double *to, const Packet2d &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< int, Packet4i > (int *to, const Packet4i &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE void pstore1< Packet4f > (float *to, const float &a)
 
template<>
EIGEN_STRONG_INLINE void pstore1< Packet2d > (double *to, const double &a)
 
template<>
EIGEN_STRONG_INLINE void prefetch< float > (const float *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< double > (const double *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< int > (const int *addr)
 
template<>
EIGEN_STRONG_INLINE float pfirst< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE double pfirst< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE int pfirst< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f preverse (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d preverse (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i preverse (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pabs (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pabs (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pabs (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet4f > (const float *a, Packet4f &a0, Packet4f &a1, Packet4f &a2, Packet4f &a3)
 
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet2d > (const double *a, Packet2d &a0, Packet2d &a1, Packet2d &a2, Packet2d &a3)
 
EIGEN_STRONG_INLINE void punpackp (Packet4f *vecs)
 
template<>
EIGEN_STRONG_INLINE Packet4f preduxp< Packet4f > (const Packet4f *vecs)
 
template<>
EIGEN_STRONG_INLINE Packet2d preduxp< Packet2d > (const Packet2d *vecs)
 
template<>
EIGEN_STRONG_INLINE float predux< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE double predux< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE int predux< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i preduxp< Packet4i > (const Packet4i *vecs)
 
template<>
EIGEN_STRONG_INLINE float predux_mul< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_mul< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE int predux_mul< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float predux_min< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_min< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE int predux_min< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float predux_max< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE double predux_max< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE int predux_max< Packet4i > (const Packet4i &a)
 
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet2d, 2 > &kernel)
 
template<>
EIGEN_STRONG_INLINE Packet4i pblend (const Selector< 4 > &ifPacket, const Packet4i &thenPacket, const Packet4i &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet4f pblend (const Selector< 4 > &ifPacket, const Packet4f &thenPacket, const Packet4f &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet2d pblend (const Selector< 2 > &ifPacket, const Packet2d &thenPacket, const Packet2d &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet4f pinsertfirst (const Packet4f &a, float b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pinsertfirst (const Packet2d &a, double b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pinsertlast (const Packet4f &a, float b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pinsertlast (const Packet2d &a, double b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pcast< Packet4f, Packet4i > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pcast< Packet4i, Packet4f > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pcast< Packet2d, Packet4f > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pcast< Packet4f, Packet2d > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pload< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pload< Packet1cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf ploadu< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE Packet1cd ploadu< Packet1cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< std::complex< double > > (std::complex< double > *to, const Packet1cd &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< float > > (std::complex< float > *to, const Packet2cf &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< std::complex< double > > (std::complex< double > *to, const Packet1cd &from)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pset1< Packet1cd > (const std::complex< double > &from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pset1< Packet2cf > (const std::complex< float > &from)
 
template<>
EIGEN_DEVICE_FUNC Packet2cf pgather< std::complex< float >, Packet2cf > (const std::complex< float > *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet1cd pgather< std::complex< double >, Packet1cd > (const std::complex< double > *from, Index stride EIGEN_UNUSED)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< float >, Packet2cf > (std::complex< float > *to, const Packet2cf &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< std::complex< double >, Packet1cd > (std::complex< double > *to, const Packet1cd &from, Index stride EIGEN_UNUSED)
 
template<>
EIGEN_STRONG_INLINE Packet2cf padd< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd padd< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf psub< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd psub< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pnegate (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pnegate (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pconj (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pconj (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pmul< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pmul< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pand< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pand< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd por< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf por< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pxor< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pxor< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pandnot< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pandnot< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
template<>
EIGEN_STRONG_INLINE Packet1cd ploaddup< Packet1cd > (const std::complex< double > *from)
 
template<>
EIGEN_STRONG_INLINE Packet2cf ploaddup< Packet2cf > (const std::complex< float > *from)
 
template<>
EIGEN_STRONG_INLINE void prefetch< std::complex< float > > (const std::complex< float > *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< std::complex< double > > (const std::complex< double > *addr)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > pfirst< Packet1cd > (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > pfirst< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd preverse (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE Packet2cf preverse (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > predux< Packet1cd > (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd preduxp< Packet1cd > (const Packet1cd *vecs)
 
template<>
EIGEN_STRONG_INLINE Packet2cf preduxp< Packet2cf > (const Packet2cf *vecs)
 
template<>
EIGEN_STRONG_INLINE std::complex< double > predux_mul< Packet1cd > (const Packet1cd &a)
 
template<>
EIGEN_STRONG_INLINE std::complex< float > predux_mul< Packet2cf > (const Packet2cf &a)
 
template<>
EIGEN_STRONG_INLINE Packet1cd pdiv< Packet1cd > (const Packet1cd &a, const Packet1cd &b)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pdiv< Packet2cf > (const Packet2cf &a, const Packet2cf &b)
 
EIGEN_STRONG_INLINE void ptranspose (PacketBlock< Packet1cd, 2 > &kernel)
 
template<>
EIGEN_STRONG_INLINE Packet2cf pblend (const Selector< 2 > &ifPacket, const Packet2cf &thenPacket, const Packet2cf &elsePacket)
 
static _EIGEN_DECLARE_CONST_Packet2d (1, 1.0)
 
static _EIGEN_DECLARE_CONST_Packet2d (2, 2.0)
 
static _EIGEN_DECLARE_CONST_Packet2d (half, 0.5)
 
static _EIGEN_DECLARE_CONST_Packet2d (exp_hi, 709.437)
 
static _EIGEN_DECLARE_CONST_Packet2d (exp_lo, -709.436139303)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_LOG2EF, 1.4426950408889634073599)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_p0, 1.26177193074810590878e-4)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_p1, 3.02994407707441961300e-2)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_p2, 9.99999999999999999910e-1)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_q0, 3.00198505138664455042e-6)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_q1, 2.52448340349684104192e-3)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_q2, 2.27265548208155028766e-1)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_q3, 2.00000000000000000009e0)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_C1, 0.693145751953125)
 
static _EIGEN_DECLARE_CONST_Packet2d (cephes_exp_C2, 1.42860682030941723212e-6)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d pexp< Packet2d > (const Packet2d &_x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f pexp< Packet4f > (const Packet4f &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d psqrt< Packet2d > (const Packet2d &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f psqrt< Packet4f > (const Packet4f &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet2d prsqrt< Packet2d > (const Packet2d &x)
 
template<>
EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f prsqrt< Packet4f > (const Packet4f &x)
 
static _EIGEN_DECLARE_CONST_FAST_Packet4i (ONE, 1)
 
static _EIGEN_DECLARE_CONST_FAST_Packet2d (ZERO, 0)
 
static _EIGEN_DECLARE_CONST_FAST_Packet2l (ZERO, 0)
 
static _EIGEN_DECLARE_CONST_FAST_Packet2l (ONE, 1)
 
std::ostream & operator<< (std::ostream &s, const Packet2l &v)
 
std::ostream & operator<< (std::ostream &s, const Packet2ul &v)
 
std::ostream & operator<< (std::ostream &s, const Packet2d &v)
 
template<int element>
EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f (const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE Packet4i pload< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f pload< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet2d pload< Packet2d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE void pstore< int > (int *to, const Packet4i &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< float > (float *to, const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE void pstore< double > (double *to, const Packet2d &from)
 
template<>
EIGEN_STRONG_INLINE Packet4i pset1< Packet4i > (const int &from)
 
template<>
EIGEN_STRONG_INLINE Packet2d pset1< Packet2d > (const double &from)
 
template<>
EIGEN_STRONG_INLINE Packet4f pset1< Packet4f > (const float &from)
 
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet4i > (const int *a, Packet4i &a0, Packet4i &a1, Packet4i &a2, Packet4i &a3)
 
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet4f > (const float *a, Packet4f &a0, Packet4f &a1, Packet4f &a2, Packet4f &a3)
 
template<>
EIGEN_STRONG_INLINE void pbroadcast4< Packet2d > (const double *a, Packet2d &a0, Packet2d &a1, Packet2d &a2, Packet2d &a3)
 
template<>
EIGEN_DEVICE_FUNC Packet4i pgather< int, Packet4i > (const int *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet4f pgather< float, Packet4f > (const float *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC Packet2d pgather< double, Packet2d > (const double *from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< int, Packet4i > (int *to, const Packet4i &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< float, Packet4f > (float *to, const Packet4f &from, Index stride)
 
template<>
EIGEN_DEVICE_FUNC void pscatter< double, Packet2d > (double *to, const Packet2d &from, Index stride)
 
template<>
EIGEN_STRONG_INLINE Packet4i padd< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f padd< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d padd< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i psub< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f psub< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d psub< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmul< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmul< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pmul< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pdiv< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pdiv< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pdiv< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pnegate (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pnegate (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pnegate (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pconj (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pconj (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pconj (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmadd (const Packet4i &a, const Packet4i &b, const Packet4i &c)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmadd (const Packet4f &a, const Packet4f &b, const Packet4f &c)
 
template<>
EIGEN_STRONG_INLINE Packet2d pmadd (const Packet2d &a, const Packet2d &b, const Packet2d &c)
 
template<>
EIGEN_STRONG_INLINE Packet4i plset< Packet4i > (const int &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f plset< Packet4f > (const float &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d plset< Packet2d > (const double &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmin< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pmin< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmin< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pmax< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pmax< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pmax< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pand< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pand< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pand< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i por< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d por< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f por< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pxor< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pxor< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pxor< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4i pandnot< Packet4i > (const Packet4i &a, const Packet4i &b)
 
template<>
EIGEN_STRONG_INLINE Packet2d pandnot< Packet2d > (const Packet2d &a, const Packet2d &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pandnot< Packet4f > (const Packet4f &a, const Packet4f &b)
 
template<>
EIGEN_STRONG_INLINE Packet4f pround< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pround< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pceil< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pceil< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pfloor< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pfloor< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i ploadu< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f ploadu< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet2d ploadu< Packet2d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE Packet4i ploaddup< Packet4i > (const int *from)
 
template<>
EIGEN_STRONG_INLINE Packet4f ploaddup< Packet4f > (const float *from)
 
template<>
EIGEN_STRONG_INLINE Packet2d ploaddup< Packet2d > (const double *from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< int > (int *to, const Packet4i &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< float > (float *to, const Packet4f &from)
 
template<>
EIGEN_STRONG_INLINE void pstoreu< double > (double *to, const Packet2d &from)
 
template<>
EIGEN_STRONG_INLINE void prefetch< int > (const int *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< float > (const float *addr)
 
template<>
EIGEN_STRONG_INLINE void prefetch< double > (const double *addr)
 
template<>
EIGEN_STRONG_INLINE int pfirst< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE float pfirst< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE double pfirst< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i preverse (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d preverse (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f preverse (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pabs< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE Packet2d pabs< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE Packet4f pabs< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int predux< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE double predux< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE float predux< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i preduxp< Packet4i > (const Packet4i *vecs)
 
template<>
EIGEN_STRONG_INLINE Packet2d preduxp< Packet2d > (const Packet2d *vecs)
 
template<>
EIGEN_STRONG_INLINE Packet4f preduxp< Packet4f > (const Packet4f *vecs)
 
template<>
EIGEN_STRONG_INLINE int predux_mul< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE double predux_mul< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE float predux_mul< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int predux_min< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE double predux_min< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE float predux_min< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE int predux_max< Packet4i > (const Packet4i &a)
 
template<>
EIGEN_STRONG_INLINE double predux_max< Packet2d > (const Packet2d &a)
 
template<>
EIGEN_STRONG_INLINE float predux_max< Packet4f > (const Packet4f &a)
 
template<>
EIGEN_STRONG_INLINE Packet4i pblend (const Selector< 4 > &ifPacket, const Packet4i &thenPacket, const Packet4i &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet4f pblend (const Selector< 4 > &ifPacket, const Packet4f &thenPacket, const Packet4f &elsePacket)
 
template<>
EIGEN_STRONG_INLINE Packet2d pblend (const Selector< 2 > &ifPacket, const Packet2d &thenPacket, const Packet2d &elsePacket)
 
template<typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed (DstXprType &dst, const SrcXprType &src, const Functor &)
 
template<typename DstXprType , typename SrcXprType , typename T1 , typename T2 >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed (DstXprType &dst, const SrcXprType &src, const internal::assign_op< T1, T2 > &)
 
template<typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop (DstXprType &dst, const SrcXprType &src, const Functor &func)
 
template<typename DstXprType , typename SrcXprType >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop (DstXprType &dst, const SrcXprType &src)
 
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment (Dst &dst, const Src &src)
 
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment (const Dst &dst, const Src &src)
 
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment (Dst &dst, const Src &src, const Func &func, typename enable_if< evaluator_assume_aliasing< Src >::value, void * >::type=0)
 
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment (Dst &dst, const Src &src, const Func &func, typename enable_if<!evaluator_assume_aliasing< Src >::value, void * >::type=0)
 
template<typename Dst , template< typename > class StorageBase, typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment (NoAlias< Dst, StorageBase > &dst, const Src &src, const Func &func)
 
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias (Dst &dst, const Src &src, const Func &func)
 
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias (Dst &dst, const Src &src)
 
template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias_no_transpose (Dst &dst, const Src &src, const Func &func)
 
template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias_no_transpose (Dst &dst, const Src &src)
 
template<typename Dst , typename Src >
void check_for_aliasing (const Dst &dst, const Src &src)
 
template<typename Decomposition >
Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate (const Decomposition &dec)
 
template<typename Decomposition >
Decomposition::RealScalar rcond_estimate_helper (typename Decomposition::RealScalar matrix_norm, const Decomposition &dec)
 Reciprocal condition number estimator.
 
static void check_DenseIndex_is_signed ()
 
template<int Alignment, typename Derived >
static Index first_aligned (const DenseBase< Derived > &m)
 
template<typename Derived >
static Index first_default_aligned (const DenseBase< Derived > &m)
 
template<typename T , int Size>
EIGEN_DEVICE_FUNC void check_static_allocation_size ()
 
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket pcast (const SrcPacket &a)
 
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket pcast (const SrcPacket &a, const SrcPacket &)
 
template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket pcast (const SrcPacket &a, const SrcPacket &, const SrcPacket &, const SrcPacket &)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet padd (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet psub (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pnegate (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pconj (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pmul (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pdiv (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pmin (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pmax (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pabs (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet parg (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pand (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet por (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pxor (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pandnot (const Packet &a, const Packet &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pload (const typename unpacket_traits< Packet >::type *from)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet ploadu (const typename unpacket_traits< Packet >::type *from)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pset1 (const typename unpacket_traits< Packet >::type &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pload1 (const typename unpacket_traits< Packet >::type *a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet ploaddup (const typename unpacket_traits< Packet >::type *from)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet ploadquad (const typename unpacket_traits< Packet >::type *from)
 
template<typename Packet >
EIGEN_DEVICE_FUNC void pbroadcast4 (const typename unpacket_traits< Packet >::type *a, Packet &a0, Packet &a1, Packet &a2, Packet &a3)
 
template<typename Packet >
EIGEN_DEVICE_FUNC void pbroadcast2 (const typename unpacket_traits< Packet >::type *a, Packet &a0, Packet &a1)
 
template<typename Packet >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet plset (const typename unpacket_traits< Packet >::type &a)
 
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void pstore (Scalar *to, const Packet &from)
 
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void pstoreu (Scalar *to, const Packet &from)
 
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC Packet pgather (const Scalar *from, Index)
 
template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void pscatter (Scalar *to, const Packet &from, Index)
 
template<typename Scalar >
EIGEN_DEVICE_FUNC void prefetch (const Scalar *addr)
 
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type pfirst (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet preduxp (const Packet *vecs)
 
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC conditional<(unpacket_traits< Packet >::size%8)==0, typenameunpacket_traits< Packet >::half, Packet >::type predux_downto4 (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux_mul (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux_min (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux_max (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet preverse (const Packet &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pcplxflip (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psin (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcos (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptan (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pasin (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pacos (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet patan (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psinh (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pcosh (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet ptanh (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pexp (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog1p (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet plog10 (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psqrt (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet prsqrt (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pround (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pfloor (const Packet &a)
 
template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pceil (const Packet &a)
 
template<typename Packet >
void pstore1 (typename unpacket_traits< Packet >::type *to, const typename unpacket_traits< Packet >::type &a)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pmadd (const Packet &a, const Packet &b, const Packet &c)
 
template<typename Packet , int Alignment>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt (const typename unpacket_traits< Packet >::type *from)
 
template<typename Scalar , typename Packet , int Alignment>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret (Scalar *to, const Packet &from)
 
template<typename Packet , int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro (const typename unpacket_traits< Packet >::type *from)
 
template<int Offset, typename PacketType >
void palign (PacketType &first, const PacketType &second)
 
template<>
std::complex< float > pmul (const std::complex< float > &a, const std::complex< float > &b)
 
template<>
std::complex< double > pmul (const std::complex< double > &a, const std::complex< double > &b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC void ptranspose (PacketBlock< Packet, 1 > &)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pblend (const Selector< unpacket_traits< Packet >::size > &ifPacket, const Packet &thenPacket, const Packet &elsePacket)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pinsertfirst (const Packet &a, typename unpacket_traits< Packet >::type b)
 
template<typename Packet >
EIGEN_DEVICE_FUNC Packet pinsertlast (const Packet &a, typename unpacket_traits< Packet >::type b)
 
template<typename Derived >
std::ostream & print_matrix (std::ostream &s, const Derived &_m, const IOFormat &fmt)
 
template<typename OldType , typename NewType >
EIGEN_DEVICE_FUNC NewType cast (const OldType &x)
 
template<typename Scalar >
 EIGEN_MATHFUNC_RETVAL (random, Scalar) random(const Scalar &x
 
template<typename Scalar >
 EIGEN_MATHFUNC_RETVAL (random, Scalar) random()
 
template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if< internal::is_integral< T >::value, bool >::type isnan_impl (const T &)
 
template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if< internal::is_integral< T >::value, bool >::type isinf_impl (const T &)
 
template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if< internal::is_integral< T >::value, bool >::type isfinite_impl (const T &)
 
template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if<(!internal::is_integral< T >::value)&&(!NumTraits< T >::IsComplex), bool >::type isfinite_impl (const T &x)
 
template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if<(!internal::is_integral< T >::value)&&(!NumTraits< T >::IsComplex), bool >::type isinf_impl (const T &x)
 
template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if<(!internal::is_integral< T >::value)&&(!NumTraits< T >::IsComplex), bool >::type isnan_impl (const T &x)
 
template<typename T >
EIGEN_DEVICE_FUNC bool isfinite_impl (const std::complex< T > &x)
 
template<typename T >
EIGEN_DEVICE_FUNC bool isnan_impl (const std::complex< T > &x)
 
template<typename T >
EIGEN_DEVICE_FUNC bool isinf_impl (const std::complex< T > &x)
 
template<typename T >
generic_fast_tanh_float (const T &a_x)
 
template<typename Scalar , typename OtherScalar >
EIGEN_DEVICE_FUNC bool isMuchSmallerThan (const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
 
template<typename Scalar >
EIGEN_DEVICE_FUNC bool isApprox (const Scalar &x, const Scalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
 
template<typename Scalar >
EIGEN_DEVICE_FUNC bool isApproxOrLessThan (const Scalar &x, const Scalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
 
template<typename RealScalar >
EIGEN_STRONG_INLINE RealScalar positive_real_hypot (const RealScalar &x, const RealScalar &y)
 
 EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT (assign_op, scalar_sum_op, add_assign_op)
 
 EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT (add_assign_op, scalar_sum_op, add_assign_op)
 
 EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT (sub_assign_op, scalar_sum_op, sub_assign_op)
 
 EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT (assign_op, scalar_difference_op, sub_assign_op)
 
 EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT (add_assign_op, scalar_difference_op, sub_assign_op)
 
 EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT (sub_assign_op, scalar_difference_op, add_assign_op)
 
template<typename Dst , typename Lhs , typename Rhs , typename Func >
void outer_product_selector_run (Dst &dst, const Lhs &lhs, const Rhs &rhs, const Func &func, const false_type &)
 
template<typename Dst , typename Lhs , typename Rhs , typename Func >
void outer_product_selector_run (Dst &dst, const Lhs &lhs, const Rhs &rhs, const Func &func, const true_type &)
 
std::ptrdiff_t manage_caching_sizes_helper (std::ptrdiff_t a, std::ptrdiff_t b)
 
void manage_caching_sizes (Action action, std::ptrdiff_t *l1, std::ptrdiff_t *l2, std::ptrdiff_t *l3)
 
template<typename LhsScalar , typename RhsScalar , int KcFactor, typename Index >
void evaluateProductBlockingSizesHeuristic (Index &k, Index &m, Index &n, Index num_threads=1)
 
template<typename Index >
bool useSpecificBlockingSizes (Index &k, Index &m, Index &n)
 
template<typename LhsScalar , typename RhsScalar , int KcFactor, typename Index >
void computeProductBlockingSizes (Index &k, Index &m, Index &n, Index num_threads=1)
 Computes the blocking parameters for a m x k times k x n matrix product.
 
template<typename LhsScalar , typename RhsScalar , typename Index >
void computeProductBlockingSizes (Index &k, Index &m, Index &n, Index num_threads=1)
 
template<typename CJ , typename A , typename B , typename C , typename T >
EIGEN_STRONG_INLINE void gebp_madd (const CJ &cj, A &a, B &b, C &c, T &t)
 
template<typename Packet >
DoublePacket< Packetpadd (const DoublePacket< Packet > &a, const DoublePacket< Packet > &b)
 
template<typename Packet >
const DoublePacket< Packet > & predux_downto4 (const DoublePacket< Packet > &a)
 
void manage_multi_threading (Action action, int *v)
 
template<bool Condition, typename Functor , typename Index >
void parallelize_gemm (const Functor &func, Index rows, Index cols, Index depth, bool transpose)
 
template<typename ExpressionType , typename Scalar >
void stable_norm_kernel (const ExpressionType &bl, Scalar &ssq, Scalar &scale, Scalar &invScale)
 
template<typename Derived >
NumTraits< typenametraits< Derived >::Scalar >::Real blueNorm_impl (const EigenBase< Derived > &_vec)
 
template<int Mode, bool SetOpposite, typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_triangular_assignment_loop (DstXprType &dst, const SrcXprType &src, const Functor &func)
 
template<int Mode, bool SetOpposite, typename DstXprType , typename SrcXprType >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_triangular_assignment_loop (DstXprType &dst, const SrcXprType &src)
 
template<typename T >
const T::Scalar * extract_data (const T &m)
 
void assert_fail (const char *condition, const char *function, const char *file, int line)
 
template<typename T >
EIGEN_DEVICE_FUNC void ignore_unused_variable (const T &)
 
EIGEN_DEVICE_FUNC void throw_std_bad_alloc ()
 
voidhandmade_aligned_malloc (std::size_t size)
 
void handmade_aligned_free (void *ptr)
 
voidhandmade_aligned_realloc (void *ptr, std::size_t size, std::size_t=0)
 
EIGEN_DEVICE_FUNC void check_that_malloc_is_allowed ()
 
EIGEN_DEVICE_FUNC voidaligned_malloc (std::size_t size)
 
EIGEN_DEVICE_FUNC void aligned_free (void *ptr)
 
voidaligned_realloc (void *ptr, std::size_t new_size, std::size_t old_size)
 
template<bool Align>
EIGEN_DEVICE_FUNC voidconditional_aligned_malloc (std::size_t size)
 
template<>
EIGEN_DEVICE_FUNC voidconditional_aligned_malloc< false > (std::size_t size)
 
template<bool Align>
EIGEN_DEVICE_FUNC void conditional_aligned_free (void *ptr)
 
template<>
EIGEN_DEVICE_FUNC void conditional_aligned_free< false > (void *ptr)
 
template<bool Align>
voidconditional_aligned_realloc (void *ptr, std::size_t new_size, std::size_t old_size)
 
template<>
voidconditional_aligned_realloc< false > (void *ptr, std::size_t new_size, std::size_t)
 
template<typename T >
EIGEN_DEVICE_FUNC void destruct_elements_of_array (T *ptr, std::size_t size)
 
template<typename T >
EIGEN_DEVICE_FUNC T * construct_elements_of_array (T *ptr, std::size_t size)
 
template<typename T >
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void check_size_for_overflow (std::size_t size)
 
template<typename T >
EIGEN_DEVICE_FUNC T * aligned_new (std::size_t size)
 
template<typename T , bool Align>
EIGEN_DEVICE_FUNC T * conditional_aligned_new (std::size_t size)
 
template<typename T >
EIGEN_DEVICE_FUNC void aligned_delete (T *ptr, std::size_t size)
 
template<typename T , bool Align>
EIGEN_DEVICE_FUNC void conditional_aligned_delete (T *ptr, std::size_t size)
 
template<typename T , bool Align>
EIGEN_DEVICE_FUNC T * conditional_aligned_realloc_new (T *pts, std::size_t new_size, std::size_t old_size)
 
template<typename T , bool Align>
EIGEN_DEVICE_FUNC T * conditional_aligned_new_auto (std::size_t size)
 
template<typename T , bool Align>
T * conditional_aligned_realloc_new_auto (T *pts, std::size_t new_size, std::size_t old_size)
 
template<typename T , bool Align>
EIGEN_DEVICE_FUNC void conditional_aligned_delete_auto (T *ptr, std::size_t size)
 
template<int Alignment, typename Scalar , typename Index >
EIGEN_DEVICE_FUNC Index first_aligned (const Scalar *array, Index size)
 
template<typename Scalar , typename Index >
EIGEN_DEVICE_FUNC Index first_default_aligned (const Scalar *array, Index size)
 
template<typename Index >
Index first_multiple (Index size, Index base)
 
template<typename T >
EIGEN_DEVICE_FUNC void smart_copy (const T *start, const T *end, T *target)
 
template<typename T >
void smart_memmove (const T *start, const T *end, T *target)
 
template<typename T >
void swap (scoped_array< T > &a, scoped_array< T > &b)
 
void queryCacheSizes (int &l1, int &l2, int &l3)
 
int queryL1CacheSize ()
 
int queryTopLevelCacheSize ()
 
template<typename T >
const T * return_ptr ()
 
template<typename IndexDest , typename IndexSrc >
EIGEN_DEVICE_FUNC IndexDest convert_index (const IndexSrc &idx)
 
template<typename T >
EIGEN_DEVICE_FUNC T * const_cast_ptr (const T *ptr)
 
template<typename T1 , typename T2 >
bool is_same_dense (const T1 &mat1, const T2 &mat2, typename enable_if< has_direct_access< T1 >::ret &&has_direct_access< T2 >::ret, T1 >::type *=0)
 
template<typename T1 , typename T2 >
bool is_same_dense (const T1 &, const T2 &, typename enable_if<!(has_direct_access< T1 >::ret &&has_direct_access< T2 >::ret), T1 >::type *=0)
 
 EIGEN_MEMBER_FUNCTOR (squaredNorm, Size *NumTraits< Scalar >::MulCost+(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (norm,(Size+5) *NumTraits< Scalar >::MulCost+(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (stableNorm,(Size+5) *NumTraits< Scalar >::MulCost+(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (blueNorm,(Size+5) *NumTraits< Scalar >::MulCost+(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (hypotNorm,(Size-1) *functor_traits< scalar_hypot_op< Scalar > >::Cost)
 
 EIGEN_MEMBER_FUNCTOR (sum,(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (mean,(Size-1) *NumTraits< Scalar >::AddCost+NumTraits< Scalar >::MulCost)
 
 EIGEN_MEMBER_FUNCTOR (minCoeff,(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (maxCoeff,(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (all,(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (any,(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (count,(Size-1) *NumTraits< Scalar >::AddCost)
 
 EIGEN_MEMBER_FUNCTOR (prod,(Size-1) *NumTraits< Scalar >::MulCost)
 
template<typename MatrixType , typename DiagType , typename SubDiagType >
ComputationInfo computeFromTridiagonal_impl (DiagType &diag, SubDiagType &subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType &eivec)
 
template<int StorageOrder, typename RealScalar , typename Scalar , typename Index >
static EIGEN_DEVICE_FUNC void tridiagonal_qr_step (RealScalar *diag, RealScalar *subdiag, Index start, Index end, Scalar *matrixQ, Index n)
 
template<typename MatrixType , typename CoeffVectorType >
void tridiagonalization_inplace (MatrixType &matA, CoeffVectorType &hCoeffs)
 
template<typename MatrixType , typename DiagonalType , typename SubDiagonalType >
void tridiagonalization_inplace (MatrixType &mat, DiagonalType &diag, SubDiagonalType &subdiag, bool extractQ)
 Performs a full tridiagonalization in place.
 
template<typename Scalar , int Dim>
static EIGEN_DEVICE_FUNC Matrix< Scalar, 2, 2 > toRotationMatrix (const Scalar &s)
 
template<typename Scalar , int Dim, typename OtherDerived >
static EIGEN_DEVICE_FUNC Matrix< Scalar, Dim, Dim > toRotationMatrix (const RotationBase< OtherDerived, Dim > &r)
 
template<typename Scalar , int Dim, typename OtherDerived >
static EIGEN_DEVICE_FUNC const MatrixBase< OtherDerived > & toRotationMatrix (const MatrixBase< OtherDerived > &mat)
 
template<typename TriangularFactorType , typename VectorsType , typename CoeffsType >
void make_block_householder_triangular_factor (TriangularFactorType &triFactor, const VectorsType &vectors, const CoeffsType &hCoeffs)
 
template<typename MatrixType , typename VectorsType , typename CoeffsType >
void apply_block_householder_on_the_left (MatrixType &mat, const VectorsType &vectors, const CoeffsType &hCoeffs, bool forward)
 
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
bool bicgstab (const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters, typename Dest::RealScalar &tol_error)
 
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
EIGEN_DONT_INLINE void conjugate_gradient (const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters, typename Dest::RealScalar &tol_error)
 
template<typename VectorV , typename VectorI >
Index QuickSplit (VectorV &row, VectorI &ind, Index ncut)
 
template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
EIGEN_DONT_INLINE void least_square_conjugate_gradient (const MatrixType &mat, const Rhs &rhs, Dest &x, const Preconditioner &precond, Index &iters, typename Dest::RealScalar &tol_error)
 
template<typename VectorX , typename VectorY , typename OtherScalar >
void apply_rotation_in_the_plane (DenseBase< VectorX > &xpr_x, DenseBase< VectorY > &xpr_y, const JacobiRotation< OtherScalar > &j)
 
template<typename Derived >
const Derived::Scalar bruteforce_det3_helper (const MatrixBase< Derived > &matrix, int a, int b, int c)
 
template<typename Derived >
const Derived::Scalar bruteforce_det4_helper (const MatrixBase< Derived > &matrix, int j, int k, int m, int n)
 
template<typename MatrixType , typename ResultType >
EIGEN_DEVICE_FUNC void compute_inverse_size2_helper (const MatrixType &matrix, const typename ResultType::Scalar &invdet, ResultType &result)
 
template<typename MatrixType , int i, int j>
EIGEN_DEVICE_FUNC MatrixType::Scalar cofactor_3x3 (const MatrixType &m)
 
template<typename MatrixType , typename ResultType >
EIGEN_DEVICE_FUNC void compute_inverse_size3_helper (const MatrixType &matrix, const typename ResultType::Scalar &invdet, const Matrix< typename ResultType::Scalar, 3, 1 > &cofactors_col0, ResultType &result)
 
template<typename Derived >
EIGEN_DEVICE_FUNC const Derived::Scalar general_det3_helper (const MatrixBase< Derived > &matrix, int i1, int i2, int i3, int j1, int j2, int j3)
 
template<typename MatrixType , int i, int j>
EIGEN_DEVICE_FUNC MatrixType::Scalar cofactor_4x4 (const MatrixType &matrix)
 
template<typename MatrixType , typename TranspositionType >
void partial_lu_inplace (MatrixType &lu, TranspositionType &row_transpositions, typename TranspositionType::StorageIndex &nb_transpositions)
 
template<typename MatrixType , typename RealScalar , typename Index >
void real_2x2_jacobi_svd (const MatrixType &matrix, Index p, Index q, JacobiRotation< RealScalar > *j_left, JacobiRotation< RealScalar > *j_right)
 
template<typename T >
amd_flip (const T &i)
 
template<typename T >
amd_unflip (const T &i)
 
template<typename T0 , typename T1 >
bool amd_marked (const T0 *w, const T1 &j)
 
template<typename T0 , typename T1 >
void amd_mark (const T0 *w, const T1 &j)
 
template<typename StorageIndex >
static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)
 
template<typename StorageIndex >
StorageIndex cs_tdfs (StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
 
template<typename Scalar , typename StorageIndex >
void minimum_degree_ordering (SparseMatrix< Scalar, ColMajor, StorageIndex > &C, PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm)
 
template<typename MatrixType >
void ordering_helper_at_plus_a (const MatrixType &A, MatrixType &symmat)
 
void eigen_pastix (pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int *invp, float *x, int nbrhs, int *iparm, double *dparm)
 
void eigen_pastix (pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int *invp, double *x, int nbrhs, int *iparm, double *dparm)
 
void eigen_pastix (pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex< float > *vals, int *perm, int *invp, std::complex< float > *x, int nbrhs, int *iparm, double *dparm)
 
void eigen_pastix (pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex< double > *vals, int *perm, int *invp, std::complex< double > *x, int nbrhs, int *iparm, double *dparm)
 
template<typename MatrixType >
void c_to_fortran_numbering (MatrixType &mat)
 
template<typename MatrixType >
void fortran_to_c_numbering (MatrixType &mat)
 
template<typename MatrixQR , typename HCoeffs >
void householder_qr_inplace_unblocked (MatrixQR &mat, HCoeffs &hCoeffs, typename MatrixQR::Scalar *tempData=0)
 
template<typename Lhs , typename Rhs , typename ResultType >
static void conservative_sparse_sparse_product_impl (const Lhs &lhs, const Rhs &rhs, ResultType &res, bool sortedInsertion=false)
 
template<typename Lhs , typename Rhs , typename ResultType >
static void sparse_sparse_to_dense_product_impl (const Lhs &lhs, const Rhs &rhs, ResultType &res)
 
template<typename DstXprType , typename SrcXprType >
void assign_sparse_to_sparse (DstXprType &dst, const SrcXprType &src)
 
template<typename Index , typename IndexVector >
Index etree_find (Index i, IndexVector &pp)
 
template<typename MatrixType , typename IndexVector >
int coletree (const MatrixType &mat, IndexVector &parent, IndexVector &firstRowElt, typename MatrixType::StorageIndex *perm=0)
 
template<typename IndexVector >
void nr_etdfs (typename IndexVector::Scalar n, IndexVector &parent, IndexVector &first_kid, IndexVector &next_kid, IndexVector &post, typename IndexVector::Scalar postnum)
 
template<typename IndexVector >
void treePostorder (typename IndexVector::Scalar n, IndexVector &parent, IndexVector &post)
 Post order a tree.
 
template<typename SparseLhsType , typename DenseRhsType , typename DenseResType , typename AlphaType >
void sparse_time_dense_product (const SparseLhsType &lhs, const DenseRhsType &rhs, DenseResType &res, const AlphaType &alpha)
 
template<typename InputIterator , typename SparseMatrixType , typename DupFunctor >
void set_from_triplets (const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
 
template<int SrcMode, int DstMode, typename MatrixType , int DestOrder>
void permute_symm_to_symm (const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &_dest, const typename MatrixType::StorageIndex *perm=0)
 
template<int Mode, typename MatrixType , int DestOrder>
void permute_symm_to_fullsymm (const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &_dest, const typename MatrixType::StorageIndex *perm=0)
 
template<int Mode, typename SparseLhsType , typename DenseRhsType , typename DenseResType , typename AlphaType >
void sparse_selfadjoint_time_dense_product (const SparseLhsType &lhs, const DenseRhsType &rhs, DenseResType &res, const AlphaType &alpha)
 
template<int _SrcMode, int _DstMode, typename MatrixType , int DstOrder>
void permute_symm_to_symm (const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DstOrder, typename MatrixType::StorageIndex > &_dest, const typename MatrixType::StorageIndex *perm)
 
template<typename Decomposition , typename Rhs , typename Dest >
enable_if< Rhs::ColsAtCompileTime!=1 &&Dest::ColsAtCompileTime!=1 >::type solve_sparse_through_dense_panels (const Decomposition &dec, const Rhs &rhs, Dest &dest)
 
template<typename Decomposition , typename Rhs , typename Dest >
enable_if< Rhs::ColsAtCompileTime==1||Dest::ColsAtCompileTime==1 >::type solve_sparse_through_dense_panels (const Decomposition &dec, const Rhs &rhs, Dest &dest)
 
template<typename Lhs , typename Rhs , typename ResultType >
static void sparse_sparse_product_with_pruning_impl (const Lhs &lhs, const Rhs &rhs, ResultType &res, const typename ResultType::RealScalar &tolerance)
 
template<typename Scalar >
EIGEN_DONT_INLINE void sparselu_gemm (Index m, Index n, Index d, const Scalar *A, Index lda, const Scalar *B, Index ldb, Scalar *C, Index ldc)
 
Index LUnumTempV (Index &m, Index &w, Index &t, Index &b)
 
template<typename Scalar >
Index LUTempSpace (Index &m, Index &w)
 
template<typename MatrixType >
SluMatrix asSluMatrix (MatrixType &mat)
 
template<typename Scalar , int Flags, typename Index >
MappedSparseMatrix< Scalar, Flags, Indexmap_superlu (SluMatrix &sluMat)
 
template<typename MatrixType >
void upperbidiagonalization_inplace_unblocked (MatrixType &mat, typename MatrixType::RealScalar *diagonal, typename MatrixType::RealScalar *upper_diagonal, typename MatrixType::Scalar *tempData=0)
 
template<typename MatrixType >
void upperbidiagonalization_blocked_helper (MatrixType &A, typename MatrixType::RealScalar *diagonal, typename MatrixType::RealScalar *upper_diagonal, Index bs, Ref< Matrix< typename MatrixType::Scalar, Dynamic, Dynamic, traits< MatrixType >::Flags &RowMajorBit > > X, Ref< Matrix< typename MatrixType::Scalar, Dynamic, Dynamic, traits< MatrixType >::Flags &RowMajorBit > > Y)
 
template<typename MatrixType , typename BidiagType >
void upperbidiagonalization_inplace_blocked (MatrixType &A, BidiagType &bidiagonal, Index maxBlockSize=32, typename MatrixType::Scalar *=0)
 
template<typename Scalar , typename IndexType >
bool GetMarketLine (std::stringstream &line, IndexType &M, IndexType &N, IndexType &i, IndexType &j, Scalar &value)
 
template<typename Scalar , typename IndexType >
bool GetMarketLine (std::stringstream &line, IndexType &M, IndexType &N, IndexType &i, IndexType &j, std::complex< Scalar > &value)
 
template<typename RealScalar >
void GetVectorElt (const std::string &line, RealScalar &val)
 
template<typename RealScalar >
void GetVectorElt (const std::string &line, std::complex< RealScalar > &val)
 
template<typename Scalar >
void putMarketHeader (std::string &header, int sym)
 
template<typename Scalar >
void PutMatrixElt (Scalar value, int row, int col, std::ofstream &out)
 
template<typename Scalar >
void PutMatrixElt (std::complex< Scalar > value, int row, int col, std::ofstream &out)
 
template<typename Scalar >
void putVectorElt (Scalar value, std::ofstream &out)
 
template<typename Scalar >
void putVectorElt (std::complex< Scalar > value, std::ofstream &out)
 

Variables

static Packet4ui p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO)
 
static Packet4f p4f_MZERO = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1)
 
static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0)
 
static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }
 
static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 }
 
static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }
 
static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }
 
static Packet16uc p16uc_FORWARD = p16uc_REVERSE32
 
static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }
 
static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8)
 
static Packet16uc p16uc_PSET32_WEVEN = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8)
 
static Packet16uc p16uc_HALF64_0_16 = vec_sld(vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 0), (Packet16uc)p4i_ZERO, 8)
 
static Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN)
 
static Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN)
 
static Packet16uc p16uc_TRANSPOSE64_HI = p16uc_PSET64_HI + p16uc_HALF64_0_16
 
static Packet16uc p16uc_TRANSPOSE64_LO = p16uc_PSET64_LO + p16uc_HALF64_0_16
 
static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8)
 
static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_PSET64_HI, p16uc_PSET64_LO, 8)
 
static Packet2ul p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8)
 
static Packet2ul p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8)
 
static Packet2d p2d_ONE = { 1.0, 1.0 }
 
static Packet2d p2d_ZERO_ = { -0.0, -0.0 }
 
static Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 }
 
static Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }
 
static Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet16uc>(p2d_ZERO), reinterpret_cast<Packet16uc>(p2d_ONE), 8))
 
static Packet16uc p16uc_PSET64_HI = { 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 }
 
static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }
 
static Packet16uc p16uc_FORWARD = { 0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15 }
 
static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }
 
static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }
 
static Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8)
 
static Packet16uc p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8)
 
static Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN)
 
static Packet16uc p16uc_TRANSPOSE64_HI = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23}
 
static Packet16uc p16uc_TRANSPOSE64_LO = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31}
 
const Scalar & y
 
const std::ptrdiff_t defaultL1CacheSize = 16*1024
 
const std::ptrdiff_t defaultL2CacheSize = 512*1024
 
const std::ptrdiff_t defaultL3CacheSize = 512*1024
 

Class Documentation

◆ Eigen::internal::abs2_retval

struct Eigen::internal::abs2_retval
template<typename Scalar>
struct Eigen::internal::abs2_retval< Scalar >
Class Members
typedef Real type

◆ Eigen::internal::add_const

struct Eigen::internal::add_const
template<typename T>
struct Eigen::internal::add_const< T >
Class Members
typedef const T type

◆ Eigen::internal::add_const< T & >

struct Eigen::internal::add_const< T & >
template<typename T>
struct Eigen::internal::add_const< T & >
Class Members
typedef T & type

◆ Eigen::internal::add_const_on_value_type

struct Eigen::internal::add_const_on_value_type
template<typename T>
struct Eigen::internal::add_const_on_value_type< T >
+ Inheritance diagram for Eigen::internal::add_const_on_value_type< T >:
Class Members
typedef const T type

◆ Eigen::internal::add_const_on_value_type< T & >

struct Eigen::internal::add_const_on_value_type< T & >
template<typename T>
struct Eigen::internal::add_const_on_value_type< T & >
Class Members
typedef T const & type

◆ Eigen::internal::add_const_on_value_type< T * >

struct Eigen::internal::add_const_on_value_type< T * >
template<typename T>
struct Eigen::internal::add_const_on_value_type< T * >
Class Members
typedef T const * type

◆ Eigen::internal::add_const_on_value_type< T *const >

struct Eigen::internal::add_const_on_value_type< T *const >
template<typename T>
struct Eigen::internal::add_const_on_value_type< T *const >
Class Members
typedef T const *const type

◆ Eigen::internal::add_const_on_value_type< T const *const >

struct Eigen::internal::add_const_on_value_type< T const *const >
template<typename T>
struct Eigen::internal::add_const_on_value_type< T const *const >
Class Members
typedef T const *const type

◆ Eigen::internal::add_const_on_value_type_if_arithmetic

struct Eigen::internal::add_const_on_value_type_if_arithmetic
template<typename T>
struct Eigen::internal::add_const_on_value_type_if_arithmetic< T >
Class Members
typedef value, T, type >::type type

◆ Eigen::internal::always_void

struct Eigen::internal::always_void
template<typename T>
struct Eigen::internal::always_void< T >
Class Members
typedef void type

◆ Eigen::internal::arg_retval

struct Eigen::internal::arg_retval
template<typename Scalar>
struct Eigen::internal::arg_retval< Scalar >
Class Members
typedef Real type

◆ Eigen::internal::assign_op< DstScalar, void >

struct Eigen::internal::assign_op< DstScalar, void >
template<typename DstScalar>
struct Eigen::internal::assign_op< DstScalar, void >

◆ Eigen::internal::Assignment

struct Eigen::internal::Assignment
template<typename DstXprType, typename SrcXprType, typename Functor, typename Kind = typename AssignmentKind< typename evaluator_traits<DstXprType>::Shape , typename evaluator_traits<SrcXprType>::Shape >::Kind, typename EnableIf = void>
struct Eigen::internal::Assignment< DstXprType, SrcXprType, Functor, Kind, EnableIf >

◆ Eigen::internal::AssignmentKind

struct Eigen::internal::AssignmentKind
template<typename, typename>
struct Eigen::internal::AssignmentKind< typename, typename >
Class Members
typedef EigenBase2EigenBase Kind

◆ Eigen::internal::AssignmentKind< DenseShape, BandShape >

struct Eigen::internal::AssignmentKind< DenseShape, BandShape >
Class Members
typedef EigenBase2EigenBase Kind

◆ Eigen::internal::AssignmentKind< DenseShape, DenseShape >

struct Eigen::internal::AssignmentKind< DenseShape, DenseShape >
Class Members
typedef Dense2Dense Kind

◆ Eigen::internal::AssignmentKind< DenseShape, DiagonalShape >

struct Eigen::internal::AssignmentKind< DenseShape, DiagonalShape >
Class Members
typedef Diagonal2Dense Kind

◆ Eigen::internal::AssignmentKind< DenseShape, HomogeneousShape >

struct Eigen::internal::AssignmentKind< DenseShape, HomogeneousShape >
Class Members
typedef Dense2Dense Kind

◆ Eigen::internal::AssignmentKind< DenseShape, PermutationShape >

struct Eigen::internal::AssignmentKind< DenseShape, PermutationShape >
Class Members
typedef EigenBase2EigenBase Kind

◆ Eigen::internal::AssignmentKind< DenseShape, SparseShape >

struct Eigen::internal::AssignmentKind< DenseShape, SparseShape >
Class Members
typedef Sparse2Dense Kind

◆ Eigen::internal::AssignmentKind< DenseShape, SparseTriangularShape >

struct Eigen::internal::AssignmentKind< DenseShape, SparseTriangularShape >
Class Members
typedef Sparse2Dense Kind

◆ Eigen::internal::AssignmentKind< DenseShape, TriangularShape >

struct Eigen::internal::AssignmentKind< DenseShape, TriangularShape >
Class Members
typedef Triangular2Dense Kind

◆ Eigen::internal::AssignmentKind< SparseSelfAdjointShape, SparseShape >

struct Eigen::internal::AssignmentKind< SparseSelfAdjointShape, SparseShape >
Class Members
typedef Sparse2Sparse Kind

◆ Eigen::internal::AssignmentKind< SparseShape, DiagonalShape >

struct Eigen::internal::AssignmentKind< SparseShape, DiagonalShape >
Class Members
typedef Diagonal2Sparse Kind

◆ Eigen::internal::AssignmentKind< SparseShape, SparseSelfAdjointShape >

struct Eigen::internal::AssignmentKind< SparseShape, SparseSelfAdjointShape >
Class Members
typedef SparseSelfAdjoint2Sparse Kind

◆ Eigen::internal::AssignmentKind< SparseShape, SparseShape >

struct Eigen::internal::AssignmentKind< SparseShape, SparseShape >
Class Members
typedef Sparse2Sparse Kind

◆ Eigen::internal::AssignmentKind< SparseShape, SparseTriangularShape >

struct Eigen::internal::AssignmentKind< SparseShape, SparseTriangularShape >
Class Members
typedef Sparse2Sparse Kind

◆ Eigen::internal::AssignmentKind< TriangularShape, DenseShape >

struct Eigen::internal::AssignmentKind< TriangularShape, DenseShape >
Class Members
typedef Dense2Triangular Kind

◆ Eigen::internal::AssignmentKind< TriangularShape, TriangularShape >

struct Eigen::internal::AssignmentKind< TriangularShape, TriangularShape >
Class Members
typedef Triangular2Triangular Kind

◆ Eigen::internal::BandShape

struct Eigen::internal::BandShape

◆ Eigen::internal::binary_evaluator

struct Eigen::internal::binary_evaluator
template<typename T, typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind, typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind, typename LhsScalar = typename traits<typename T::Lhs>::Scalar, typename RhsScalar = typename traits<typename T::Rhs>::Scalar>
struct Eigen::internal::binary_evaluator< T, LhsKind, RhsKind, LhsScalar, RhsScalar >
+ Inheritance diagram for Eigen::internal::binary_evaluator< T, LhsKind, RhsKind, LhsScalar, RhsScalar >:

◆ Eigen::internal::binary_result_of_select

struct Eigen::internal::binary_result_of_select
template<typename Func, typename ArgType0, typename ArgType1, int SizeOf = sizeof(has_none)>
struct Eigen::internal::binary_result_of_select< Func, ArgType0, ArgType1, SizeOf >
Class Members
typedef type type

◆ Eigen::internal::binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_std_result_type)>

struct Eigen::internal::binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_std_result_type)>
template<typename Func, typename ArgType0, typename ArgType1>
struct Eigen::internal::binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_std_result_type)>
Class Members
typedef result_type type

◆ Eigen::internal::binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_tr1_result)>

struct Eigen::internal::binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_tr1_result)>
template<typename Func, typename ArgType0, typename ArgType1>
struct Eigen::internal::binary_result_of_select< Func, ArgType0, ArgType1, sizeof(has_tr1_result)>
Class Members
typedef template type type

◆ Eigen::internal::binary_sparse_evaluator

struct Eigen::internal::binary_sparse_evaluator
template<typename XprType>
struct Eigen::internal::binary_sparse_evaluator< XprType >

◆ Eigen::internal::cast_return_type

struct Eigen::internal::cast_return_type
template<typename XprType, typename CastType>
struct Eigen::internal::cast_return_type< XprType, CastType >
Class Members
typedef type _CastType
typedef Scalar CurrentScalarType
typedef Scalar NewScalarType
typedef value, constXprType &, CastType >::type type

◆ Eigen::internal::cholmod_configure_matrix

struct Eigen::internal::cholmod_configure_matrix
template<typename Scalar>
struct Eigen::internal::cholmod_configure_matrix< Scalar >

◆ Eigen::internal::compute_inverse_and_det_with_check

struct Eigen::internal::compute_inverse_and_det_with_check
template<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>
struct Eigen::internal::compute_inverse_and_det_with_check< MatrixType, ResultType, Size >

◆ Eigen::internal::conditional

struct Eigen::internal::conditional
template<bool Condition, typename Then, typename Else>
struct Eigen::internal::conditional< Condition, Then, Else >
+ Inheritance diagram for Eigen::internal::conditional< Condition, Then, Else >:
Class Members
typedef Then type

◆ Eigen::internal::conditional< false, Then, Else >

struct Eigen::internal::conditional< false, Then, Else >
template<typename Then, typename Else>
struct Eigen::internal::conditional< false, Then, Else >
Class Members
typedef Else type

◆ Eigen::internal::conj_if

struct Eigen::internal::conj_if
template<bool Conjugate>
struct Eigen::internal::conj_if< Conjugate >

◆ Eigen::internal::conj_retval

struct Eigen::internal::conj_retval
template<typename Scalar>
struct Eigen::internal::conj_retval< Scalar >
Class Members
typedef Scalar type

◆ Eigen::internal::conservative_sparse_sparse_product_selector

struct Eigen::internal::conservative_sparse_sparse_product_selector
template<typename Lhs, typename Rhs, typename ResultType, int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor, int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor, int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor>
struct Eigen::internal::conservative_sparse_sparse_product_selector< Lhs, Rhs, ResultType, LhsStorageOrder, RhsStorageOrder, ResStorageOrder >

◆ Eigen::internal::constructor_without_unaligned_array_assert

struct Eigen::internal::constructor_without_unaligned_array_assert

◆ Eigen::internal::cwise_promote_storage_type

struct Eigen::internal::cwise_promote_storage_type
template<typename A, typename B, typename Functor>
struct Eigen::internal::cwise_promote_storage_type< A, B, Functor >

◆ Eigen::internal::cwise_promote_storage_type< A, A, Functor >

struct Eigen::internal::cwise_promote_storage_type< A, A, Functor >
template<typename A, typename Functor>
struct Eigen::internal::cwise_promote_storage_type< A, A, Functor >
Class Members
typedef A ret

◆ Eigen::internal::cwise_promote_storage_type< A, Dense, Functor >

struct Eigen::internal::cwise_promote_storage_type< A, Dense, Functor >
template<typename A, typename Functor>
struct Eigen::internal::cwise_promote_storage_type< A, Dense, Functor >
Class Members
typedef Dense ret

◆ Eigen::internal::cwise_promote_storage_type< Dense, B, Functor >

struct Eigen::internal::cwise_promote_storage_type< Dense, B, Functor >
template<typename B, typename Functor>
struct Eigen::internal::cwise_promote_storage_type< Dense, B, Functor >
Class Members
typedef Dense ret

◆ Eigen::internal::cwise_promote_storage_type< Dense, Dense, Functor >

struct Eigen::internal::cwise_promote_storage_type< Dense, Dense, Functor >
template<typename Functor>
struct Eigen::internal::cwise_promote_storage_type< Dense, Dense, Functor >
Class Members
typedef Dense ret

◆ Eigen::internal::cwise_promote_storage_type< Dense, Sparse, Functor >

struct Eigen::internal::cwise_promote_storage_type< Dense, Sparse, Functor >
template<typename Functor>
struct Eigen::internal::cwise_promote_storage_type< Dense, Sparse, Functor >
Class Members
typedef Sparse ret

◆ Eigen::internal::cwise_promote_storage_type< Sparse, Dense, Functor >

struct Eigen::internal::cwise_promote_storage_type< Sparse, Dense, Functor >
template<typename Functor>
struct Eigen::internal::cwise_promote_storage_type< Sparse, Dense, Functor >
Class Members
typedef Sparse ret

◆ Eigen::internal::Dense2Dense

struct Eigen::internal::Dense2Dense

◆ Eigen::internal::Dense2Triangular

struct Eigen::internal::Dense2Triangular

◆ Eigen::internal::dense_assignment_loop

struct Eigen::internal::dense_assignment_loop
template<typename Kernel, int Traversal = Kernel::AssignmentTraits::Traversal, int Unrolling = Kernel::AssignmentTraits::Unrolling>
struct Eigen::internal::dense_assignment_loop< Kernel, Traversal, Unrolling >

◆ Eigen::internal::dense_xpr_base

struct Eigen::internal::dense_xpr_base
template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
struct Eigen::internal::dense_xpr_base< Derived, XprKind >

◆ Eigen::internal::dense_xpr_base< Derived, ArrayXpr >

struct Eigen::internal::dense_xpr_base< Derived, ArrayXpr >
template<typename Derived>
struct Eigen::internal::dense_xpr_base< Derived, ArrayXpr >
Class Members
typedef ArrayBase< Derived > type

◆ Eigen::internal::dense_xpr_base< Derived, MatrixXpr >

struct Eigen::internal::dense_xpr_base< Derived, MatrixXpr >
template<typename Derived>
struct Eigen::internal::dense_xpr_base< Derived, MatrixXpr >
Class Members
typedef MatrixBase< Derived > type

◆ Eigen::internal::Diagonal2Dense

struct Eigen::internal::Diagonal2Dense

◆ Eigen::internal::Diagonal2Sparse

struct Eigen::internal::Diagonal2Sparse

◆ Eigen::internal::DoublePacket

struct Eigen::internal::DoublePacket
template<typename Packet>
struct Eigen::internal::DoublePacket< Packet >
+ Collaboration diagram for Eigen::internal::DoublePacket< Packet >:
Class Members
Packet first
Packet second

◆ Eigen::internal::EigenBase2EigenBase

struct Eigen::internal::EigenBase2EigenBase

◆ Eigen::internal::enable_if

struct Eigen::internal::enable_if
template<bool Condition, typename T = void>
struct Eigen::internal::enable_if< Condition, T >

◆ Eigen::internal::enable_if< true, T >

struct Eigen::internal::enable_if< true, T >
template<typename T>
struct Eigen::internal::enable_if< true, T >
Class Members
typedef T type

◆ Eigen::internal::enable_if_ref

struct Eigen::internal::enable_if_ref
template<typename T, typename Derived>
struct Eigen::internal::enable_if_ref< T, Derived >

◆ Eigen::internal::enable_if_ref< Ref< T >, Derived >

struct Eigen::internal::enable_if_ref< Ref< T >, Derived >
template<typename T, typename Derived>
struct Eigen::internal::enable_if_ref< Ref< T >, Derived >
Class Members
typedef Derived type

◆ Eigen::internal::EnableIf

struct Eigen::internal::EnableIf
template<bool>
struct Eigen::internal::EnableIf< bool >

◆ Eigen::internal::etor_product_coeff_impl

struct Eigen::internal::etor_product_coeff_impl
template<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct Eigen::internal::etor_product_coeff_impl< Traversal, UnrollingIndex, Lhs, Rhs, RetScalar >

◆ Eigen::internal::etor_product_packet_impl

struct Eigen::internal::etor_product_packet_impl
template<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct Eigen::internal::etor_product_packet_impl< StorageOrder, UnrollingIndex, Lhs, Rhs, Packet, LoadMode >

◆ Eigen::internal::eval

struct Eigen::internal::eval
template<typename T, typename StorageKind = typename traits<T>::StorageKind>
struct Eigen::internal::eval< T, StorageKind >

◆ Eigen::internal::eval< Array< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >

struct Eigen::internal::eval< Array< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >
template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
struct Eigen::internal::eval< Array< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >
Class Members
typedef const Array< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > & type

◆ Eigen::internal::eval< Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >

struct Eigen::internal::eval< Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >
template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
struct Eigen::internal::eval< Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >, Dense >
Class Members
typedef const Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols > & type

◆ Eigen::internal::eval< T, Dense >

struct Eigen::internal::eval< T, Dense >
template<typename T>
struct Eigen::internal::eval< T, Dense >
Class Members
typedef type type

◆ Eigen::internal::eval< T, DiagonalShape >

struct Eigen::internal::eval< T, DiagonalShape >
template<typename T>
struct Eigen::internal::eval< T, DiagonalShape >
Class Members
typedef type type

◆ Eigen::internal::evaluator_traits< Homogeneous< ArgType, Direction > >

struct Eigen::internal::evaluator_traits< Homogeneous< ArgType, Direction > >
template<typename ArgType, int Direction>
struct Eigen::internal::evaluator_traits< Homogeneous< ArgType, Direction > >
Class Members
typedef Kind Kind
typedef HomogeneousShape Shape

◆ Eigen::internal::evaluator_traits< SelfAdjointView< MatrixType, Mode > >

struct Eigen::internal::evaluator_traits< SelfAdjointView< MatrixType, Mode > >
template<typename MatrixType, unsigned int Mode>
struct Eigen::internal::evaluator_traits< SelfAdjointView< MatrixType, Mode > >
Class Members
typedef Kind Kind
typedef SelfAdjointShape Shape

◆ Eigen::internal::evaluator_traits< SparseQRMatrixQReturnType< SparseQRType > >

struct Eigen::internal::evaluator_traits< SparseQRMatrixQReturnType< SparseQRType > >
template<typename SparseQRType>
struct Eigen::internal::evaluator_traits< SparseQRMatrixQReturnType< SparseQRType > >
Class Members
typedef Kind Kind
typedef MatrixType MatrixType
typedef SparseShape Shape

◆ Eigen::internal::evaluator_traits< SparseSelfAdjointView< MatrixType, Mode > >

struct Eigen::internal::evaluator_traits< SparseSelfAdjointView< MatrixType, Mode > >
template<typename MatrixType, unsigned int Mode>
struct Eigen::internal::evaluator_traits< SparseSelfAdjointView< MatrixType, Mode > >
Class Members
typedef Kind Kind
typedef SparseSelfAdjointShape Shape

◆ Eigen::internal::evaluator_traits< TriangularView< MatrixType, Mode > >

struct Eigen::internal::evaluator_traits< TriangularView< MatrixType, Mode > >
template<typename MatrixType, unsigned int Mode>
struct Eigen::internal::evaluator_traits< TriangularView< MatrixType, Mode > >
Class Members
typedef Kind Kind
typedef Shape, TriangularShape >::type Shape

◆ Eigen::internal::find_best_packet

struct Eigen::internal::find_best_packet
template<typename T, int Size>
struct Eigen::internal::find_best_packet< T, Size >
Class Members
typedef type >::type type

◆ Eigen::internal::find_best_packet_helper

struct Eigen::internal::find_best_packet_helper
template<int Size, typename PacketType, bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>
struct Eigen::internal::find_best_packet_helper< Size, PacketType, Stop >

◆ Eigen::internal::find_best_packet_helper< Size, PacketType, false >

struct Eigen::internal::find_best_packet_helper< Size, PacketType, false >
template<int Size, typename PacketType>
struct Eigen::internal::find_best_packet_helper< Size, PacketType, false >
Class Members
typedef half >::type type

◆ Eigen::internal::find_best_packet_helper< Size, PacketType, true >

struct Eigen::internal::find_best_packet_helper< Size, PacketType, true >
template<int Size, typename PacketType>
struct Eigen::internal::find_best_packet_helper< Size, PacketType, true >
Class Members
typedef PacketType type

◆ Eigen::internal::gemm_blocking_space

class Eigen::internal::gemm_blocking_space
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor = 1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic>
class Eigen::internal::gemm_blocking_space< StorageOrder, LhsScalar, RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, FiniteAtCompileTime >

◆ Eigen::internal::gemm_pack_lhs

struct Eigen::internal::gemm_pack_lhs
template<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
struct Eigen::internal::gemm_pack_lhs< Scalar, Index, DataMapper, Pack1, Pack2, StorageOrder, Conjugate, PanelMode >

◆ Eigen::internal::gemm_pack_rhs

struct Eigen::internal::gemm_pack_rhs
template<typename Scalar, typename Index, typename DataMapper, int nr, int StorageOrder, bool Conjugate = false, bool PanelMode = false>
struct Eigen::internal::gemm_pack_rhs< Scalar, Index, DataMapper, nr, StorageOrder, Conjugate, PanelMode >

◆ Eigen::internal::gemv_dense_selector

struct Eigen::internal::gemv_dense_selector
template<int Side, int StorageOrder, bool BlasCompatible>
struct Eigen::internal::gemv_dense_selector< Side, StorageOrder, BlasCompatible >

◆ Eigen::internal::gemv_static_vector_if

struct Eigen::internal::gemv_static_vector_if
template<typename Scalar, int Size, int MaxSize, bool Cond>
struct Eigen::internal::gemv_static_vector_if< Scalar, Size, MaxSize, Cond >

◆ Eigen::internal::general_matrix_matrix_product

struct Eigen::internal::general_matrix_matrix_product
template<typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResStorageOrder>
struct Eigen::internal::general_matrix_matrix_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ResStorageOrder >

◆ Eigen::internal::general_matrix_matrix_triangular_product

struct Eigen::internal::general_matrix_matrix_triangular_product
template<typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResStorageOrder, int UpLo, int Version = Specialized>
struct Eigen::internal::general_matrix_matrix_triangular_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ResStorageOrder, UpLo, Version >
+ Inheritance diagram for Eigen::internal::general_matrix_matrix_triangular_product< Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ResStorageOrder, UpLo, Version >:

◆ Eigen::internal::general_matrix_vector_product

struct Eigen::internal::general_matrix_vector_product
template<typename Index, typename LhsScalar, typename LhsMapper, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version = Specialized>
struct Eigen::internal::general_matrix_vector_product< Index, LhsScalar, LhsMapper, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsMapper, ConjugateRhs, Version >

◆ Eigen::internal::general_matrix_vector_product_gemv

struct Eigen::internal::general_matrix_vector_product_gemv
template<typename Index, typename LhsScalar, int StorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
struct Eigen::internal::general_matrix_vector_product_gemv< Index, LhsScalar, StorageOrder, ConjugateLhs, RhsScalar, ConjugateRhs >

◆ Eigen::internal::generic_matrix_wrapper

class Eigen::internal::generic_matrix_wrapper
template<typename MatrixType, bool MatrixFree = !internal::is_ref_compatible<MatrixType>::value>
class Eigen::internal::generic_matrix_wrapper< MatrixType, MatrixFree >

◆ Eigen::internal::generic_product_impl

struct Eigen::internal::generic_product_impl
template<typename Lhs, typename Rhs, typename LhsShape = typename evaluator_traits<Lhs>::Shape, typename RhsShape = typename evaluator_traits<Rhs>::Shape, int ProductType = internal::product_type<Lhs,Rhs>::value>
struct Eigen::internal::generic_product_impl< Lhs, Rhs, LhsShape, RhsShape, ProductType >

◆ Eigen::internal::generic_xpr_base

struct Eigen::internal::generic_xpr_base
template<typename Derived, typename XprKind = typename traits<Derived>::XprKind, typename StorageKind = typename traits<Derived>::StorageKind>
struct Eigen::internal::generic_xpr_base< Derived, XprKind, StorageKind >

◆ Eigen::internal::generic_xpr_base< Derived, MatrixXpr, SolverStorage >

struct Eigen::internal::generic_xpr_base< Derived, MatrixXpr, SolverStorage >
template<typename Derived>
struct Eigen::internal::generic_xpr_base< Derived, MatrixXpr, SolverStorage >
Class Members
typedef SolverBase< Derived > type

◆ Eigen::internal::generic_xpr_base< Derived, MatrixXpr, Sparse >

struct Eigen::internal::generic_xpr_base< Derived, MatrixXpr, Sparse >
template<typename Derived>
struct Eigen::internal::generic_xpr_base< Derived, MatrixXpr, Sparse >
Class Members
typedef SparseMatrixBase< Derived > type

◆ Eigen::internal::generic_xpr_base< Derived, XprKind, Dense >

struct Eigen::internal::generic_xpr_base< Derived, XprKind, Dense >
template<typename Derived, typename XprKind>
struct Eigen::internal::generic_xpr_base< Derived, XprKind, Dense >
Class Members
typedef type type

◆ Eigen::internal::global_math_functions_filtering_base

struct Eigen::internal::global_math_functions_filtering_base
template<typename T, typename dummy = void>
struct Eigen::internal::global_math_functions_filtering_base< T, dummy >
Class Members
typedef T type

◆ Eigen::internal::global_math_functions_filtering_base< T, typename always_void< typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl >::type >

struct Eigen::internal::global_math_functions_filtering_base< T, typename always_void< typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl >::type >
template<typename T>
struct Eigen::internal::global_math_functions_filtering_base< T, typename always_void< typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl >::type >
Class Members
typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type

◆ Eigen::internal::glue_shapes

struct Eigen::internal::glue_shapes
template<typename S1, typename S2>
struct Eigen::internal::glue_shapes< S1, S2 >

◆ Eigen::internal::glue_shapes< DenseShape, TriangularShape >

struct Eigen::internal::glue_shapes< DenseShape, TriangularShape >
Class Members
typedef TriangularShape type

◆ Eigen::internal::glue_shapes< SparseShape, SelfAdjointShape >

struct Eigen::internal::glue_shapes< SparseShape, SelfAdjointShape >
Class Members
typedef SparseSelfAdjointShape type

◆ Eigen::internal::glue_shapes< SparseShape, TriangularShape >

struct Eigen::internal::glue_shapes< SparseShape, TriangularShape >
Class Members
typedef SparseTriangularShape type

◆ Eigen::internal::has_none

struct Eigen::internal::has_none
Class Members
int a[1]

◆ Eigen::internal::has_std_result_type

struct Eigen::internal::has_std_result_type
Class Members
int a[2]

◆ Eigen::internal::has_tr1_result

struct Eigen::internal::has_tr1_result
Class Members
int a[3]

◆ Eigen::internal::homogeneous_left_product_impl

struct Eigen::internal::homogeneous_left_product_impl
template<typename MatrixType, typename Lhs>
struct Eigen::internal::homogeneous_left_product_impl< MatrixType, Lhs >

◆ Eigen::internal::homogeneous_right_product_impl

struct Eigen::internal::homogeneous_right_product_impl
template<typename MatrixType, typename Rhs>
struct Eigen::internal::homogeneous_right_product_impl< MatrixType, Rhs >

◆ Eigen::internal::HouseholderSequenceShape

struct Eigen::internal::HouseholderSequenceShape

◆ Eigen::internal::hypot_retval

struct Eigen::internal::hypot_retval
template<typename Scalar>
struct Eigen::internal::hypot_retval< Scalar >
Class Members
typedef Real type

◆ Eigen::internal::imag_ref_retval

struct Eigen::internal::imag_ref_retval
template<typename Scalar>
struct Eigen::internal::imag_ref_retval< Scalar >
Class Members
typedef Real & type

◆ Eigen::internal::imag_retval

struct Eigen::internal::imag_retval
template<typename Scalar>
struct Eigen::internal::imag_retval< Scalar >
Class Members
typedef Real type

◆ Eigen::internal::image_retval

struct Eigen::internal::image_retval
template<typename DecompositionType>
struct Eigen::internal::image_retval< DecompositionType >

◆ Eigen::internal::IndexBased

struct Eigen::internal::IndexBased

◆ Eigen::internal::inner_iterator_selector

class Eigen::internal::inner_iterator_selector
template<typename XprType, typename EvaluatorKind>
class Eigen::internal::inner_iterator_selector< XprType, EvaluatorKind >
+ Inheritance diagram for Eigen::internal::inner_iterator_selector< XprType, EvaluatorKind >:

◆ Eigen::internal::inplace_transpose_selector

struct Eigen::internal::inplace_transpose_selector
template<typename MatrixType, bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic, bool MatchPacketSize = (int(MatrixType::RowsAtCompileTime) == int(internal::packet_traits<typename MatrixType::Scalar>::size)) && (internal::evaluator<MatrixType>::Flags&PacketAccessBit)>
struct Eigen::internal::inplace_transpose_selector< MatrixType, IsSquare, MatchPacketSize >

◆ Eigen::internal::inverse_impl

struct Eigen::internal::inverse_impl
template<typename MatrixType>
struct Eigen::internal::inverse_impl< MatrixType >

◆ Eigen::internal::IteratorBased

struct Eigen::internal::IteratorBased

◆ Eigen::internal::kernel_retval

struct Eigen::internal::kernel_retval
template<typename DecompositionType>
struct Eigen::internal::kernel_retval< DecompositionType >

◆ Eigen::internal::lapacke_llt

struct Eigen::internal::lapacke_llt
template<typename Scalar>
struct Eigen::internal::lapacke_llt< Scalar >

◆ Eigen::internal::ldlt_inplace

struct Eigen::internal::ldlt_inplace
template<int UpLo>
struct Eigen::internal::ldlt_inplace< UpLo >

◆ Eigen::internal::LDLT_Traits

struct Eigen::internal::LDLT_Traits
template<typename MatrixType, int UpLo>
struct Eigen::internal::LDLT_Traits< MatrixType, UpLo >

◆ Eigen::internal::linspaced_op_impl

struct Eigen::internal::linspaced_op_impl
template<typename Scalar, typename Packet, bool IsInteger>
struct Eigen::internal::linspaced_op_impl< Scalar, Packet, IsInteger >
+ Inheritance diagram for Eigen::internal::linspaced_op_impl< Scalar, Packet, IsInteger >:

◆ Eigen::internal::llt_inplace

struct Eigen::internal::llt_inplace
template<typename Scalar, int UpLo>
struct Eigen::internal::llt_inplace< Scalar, UpLo >

◆ Eigen::internal::LLT_Traits

struct Eigen::internal::LLT_Traits
template<typename MatrixType, int UpLo>
struct Eigen::internal::LLT_Traits< MatrixType, UpLo >

◆ Eigen::internal::log1p_retval

struct Eigen::internal::log1p_retval
template<typename Scalar>
struct Eigen::internal::log1p_retval< Scalar >
Class Members
typedef Scalar type

◆ Eigen::internal::LU_GlobalLU_t

struct Eigen::internal::LU_GlobalLU_t
template<typename IndexVector, typename ScalarVector>
struct Eigen::internal::LU_GlobalLU_t< IndexVector, ScalarVector >
Class Members
typedef Scalar StorageIndex
Class Members
IndexVector lsub
ScalarVector lusup
Index n
Index num_expansions
Index nzlmax
Index nzlumax
Index nzumax
IndexVector supno
ScalarVector ucol
IndexVector usub
IndexVector xlsub
IndexVector xlusup
IndexVector xsup
IndexVector xusub

◆ Eigen::internal::make_unsigned

struct Eigen::internal::make_unsigned
template<typename>
struct Eigen::internal::make_unsigned< typename >

◆ Eigen::internal::make_unsigned< char >

struct Eigen::internal::make_unsigned< char >
Class Members
typedef unsigned char type

◆ Eigen::internal::make_unsigned< signed char >

struct Eigen::internal::make_unsigned< signed char >
Class Members
typedef unsigned char type

◆ Eigen::internal::make_unsigned< signed int >

struct Eigen::internal::make_unsigned< signed int >
Class Members
typedef unsigned int type

◆ Eigen::internal::make_unsigned< signed long >

struct Eigen::internal::make_unsigned< signed long >
Class Members
typedef unsigned long type

◆ Eigen::internal::make_unsigned< signed short >

struct Eigen::internal::make_unsigned< signed short >
Class Members
typedef unsigned short type

◆ Eigen::internal::make_unsigned< unsigned char >

struct Eigen::internal::make_unsigned< unsigned char >
Class Members
typedef unsigned char type

◆ Eigen::internal::make_unsigned< unsigned int >

struct Eigen::internal::make_unsigned< unsigned int >
Class Members
typedef unsigned int type

◆ Eigen::internal::make_unsigned< unsigned long >

struct Eigen::internal::make_unsigned< unsigned long >
Class Members
typedef unsigned long type

◆ Eigen::internal::make_unsigned< unsigned short >

struct Eigen::internal::make_unsigned< unsigned short >
Class Members
typedef unsigned short type

◆ Eigen::internal::matrix_type_times_scalar_type

struct Eigen::internal::matrix_type_times_scalar_type
template<typename OtherScalarType, typename MatrixType>
struct Eigen::internal::matrix_type_times_scalar_type< OtherScalarType, MatrixType >
Class Members
typedef ReturnType ResultScalar
typedef Matrix< ResultScalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime > Type

◆ Eigen::internal::meta_floor_log2

struct Eigen::internal::meta_floor_log2
template<unsigned int n, int lower = 0, int upper = sizeof(unsigned int) * CHAR_BIT - 1, int selector = meta_floor_log2_selector<n, lower, upper>::value>
struct Eigen::internal::meta_floor_log2< n, lower, upper, selector >

◆ Eigen::internal::meta_floor_log2< n, lower, upper, meta_floor_log2_bogus >

struct Eigen::internal::meta_floor_log2< n, lower, upper, meta_floor_log2_bogus >
template<unsigned int n, int lower, int upper>
struct Eigen::internal::meta_floor_log2< n, lower, upper, meta_floor_log2_bogus >

◆ Eigen::internal::meta_no

struct Eigen::internal::meta_no
Class Members
char a[2]

◆ Eigen::internal::meta_yes

struct Eigen::internal::meta_yes
Class Members
char a[1]

◆ Eigen::internal::nested_eval< ReturnByValue< Derived >, n, PlainObject >

struct Eigen::internal::nested_eval< ReturnByValue< Derived >, n, PlainObject >
template<typename Derived, int n, typename PlainObject>
struct Eigen::internal::nested_eval< ReturnByValue< Derived >, n, PlainObject >
Class Members
typedef ReturnType type

◆ Eigen::internal::norm1_retval

struct Eigen::internal::norm1_retval
template<typename Scalar>
struct Eigen::internal::norm1_retval< Scalar >
Class Members
typedef Real type

◆ Eigen::internal::nullary_wrapper< Scalar, NullaryOp, false, false, false >

struct Eigen::internal::nullary_wrapper< Scalar, NullaryOp, false, false, false >
template<typename Scalar, typename NullaryOp>
struct Eigen::internal::nullary_wrapper< Scalar, NullaryOp, false, false, false >

◆ Eigen::internal::Packet

union Eigen::internal::Packet
Class Members
double d[2]
int32_t i[4]
int64_t l[2]
uint32_t ui[4]
uint64_t ul[2]
Packet2d v2d
Packet2l v2l
Packet2ul v2ul
Packet4i v4i
Packet4ui v4ui

◆ Eigen::internal::Packet2cf.__unnamed74__

union Eigen::internal::Packet2cf.__unnamed74__
Class Members
Packet1cd cd[2]
Packet4f v

◆ Eigen::internal::Packet4f

struct Eigen::internal::Packet4f
Class Members
Packet2d v4f[2]

◆ Eigen::internal::PacketBlock

struct Eigen::internal::PacketBlock
template<typename Packet, int N = unpacket_traits<Packet>::size>
struct Eigen::internal::PacketBlock< Packet, N >
+ Collaboration diagram for Eigen::internal::PacketBlock< Packet, N >:
Class Members
Packet packet[N]

◆ Eigen::internal::pardiso_traits

struct Eigen::internal::pardiso_traits
template<class Pardiso>
struct Eigen::internal::pardiso_traits< Pardiso >

◆ Eigen::internal::pardiso_traits< PardisoLDLT< _MatrixType, Options > >

struct Eigen::internal::pardiso_traits< PardisoLDLT< _MatrixType, Options > >
template<typename _MatrixType, int Options>
struct Eigen::internal::pardiso_traits< PardisoLDLT< _MatrixType, Options > >
Class Members
typedef _MatrixType MatrixType
typedef RealScalar RealScalar
typedef Scalar Scalar
typedef StorageIndex StorageIndex

◆ Eigen::internal::pardiso_traits< PardisoLLT< _MatrixType, Options > >

struct Eigen::internal::pardiso_traits< PardisoLLT< _MatrixType, Options > >
template<typename _MatrixType, int Options>
struct Eigen::internal::pardiso_traits< PardisoLLT< _MatrixType, Options > >
Class Members
typedef _MatrixType MatrixType
typedef RealScalar RealScalar
typedef Scalar Scalar
typedef StorageIndex StorageIndex

◆ Eigen::internal::pardiso_traits< PardisoLU< _MatrixType > >

struct Eigen::internal::pardiso_traits< PardisoLU< _MatrixType > >
template<typename _MatrixType>
struct Eigen::internal::pardiso_traits< PardisoLU< _MatrixType > >
Class Members
typedef _MatrixType MatrixType
typedef RealScalar RealScalar
typedef Scalar Scalar
typedef StorageIndex StorageIndex

◆ Eigen::internal::pastix_traits

struct Eigen::internal::pastix_traits
template<class Pastix>
struct Eigen::internal::pastix_traits< Pastix >

◆ Eigen::internal::pastix_traits< PastixLDLT< _MatrixType, Options > >

struct Eigen::internal::pastix_traits< PastixLDLT< _MatrixType, Options > >
template<typename _MatrixType, int Options>
struct Eigen::internal::pastix_traits< PastixLDLT< _MatrixType, Options > >
Class Members
typedef _MatrixType MatrixType
typedef RealScalar RealScalar
typedef Scalar Scalar
typedef StorageIndex StorageIndex

◆ Eigen::internal::pastix_traits< PastixLLT< _MatrixType, Options > >

struct Eigen::internal::pastix_traits< PastixLLT< _MatrixType, Options > >
template<typename _MatrixType, int Options>
struct Eigen::internal::pastix_traits< PastixLLT< _MatrixType, Options > >
Class Members
typedef _MatrixType MatrixType
typedef RealScalar RealScalar
typedef Scalar Scalar
typedef StorageIndex StorageIndex

◆ Eigen::internal::pastix_traits< PastixLU< _MatrixType > >

struct Eigen::internal::pastix_traits< PastixLU< _MatrixType > >
template<typename _MatrixType>
struct Eigen::internal::pastix_traits< PastixLU< _MatrixType > >
Class Members
typedef _MatrixType MatrixType
typedef RealScalar RealScalar
typedef Scalar Scalar
typedef StorageIndex StorageIndex

◆ Eigen::internal::perfvalues

struct Eigen::internal::perfvalues
Class Members
Index colblk
Index fillfactor
Index maxsuper
Index panel_size
Index relax
Index rowblk

◆ Eigen::internal::permutation_matrix_product

struct Eigen::internal::permutation_matrix_product
template<typename ExpressionType, int Side, bool Transposed, typename ExpressionShape>
struct Eigen::internal::permutation_matrix_product< ExpressionType, Side, Transposed, ExpressionShape >
+ Inheritance diagram for Eigen::internal::permutation_matrix_product< ExpressionType, Side, Transposed, ExpressionShape >:

◆ Eigen::internal::plain_col_type

struct Eigen::internal::plain_col_type
template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
struct Eigen::internal::plain_col_type< ExpressionType, Scalar >
+ Inheritance diagram for Eigen::internal::plain_col_type< ExpressionType, Scalar >:
Class Members
typedef Array< Scalar, RowsAtCompileTime, 1, Options &~RowMajor, MaxRowsAtCompileTime, 1 > ArrayColType
typedef Matrix< Scalar, RowsAtCompileTime, 1, Options &~RowMajor, MaxRowsAtCompileTime, 1 > MatrixColType
typedef XprKind, MatrixXpr >::value, MatrixColType, ArrayColType >::type type

◆ Eigen::internal::plain_matrix_type

struct Eigen::internal::plain_matrix_type
template<typename T, typename StorageKind = typename traits<T>::StorageKind>
struct Eigen::internal::plain_matrix_type< T, StorageKind >

◆ Eigen::internal::plain_matrix_type< T, Dense >

struct Eigen::internal::plain_matrix_type< T, Dense >
template<typename T>
struct Eigen::internal::plain_matrix_type< T, Dense >
Class Members
typedef XprKind, Flags >::type type

◆ Eigen::internal::plain_matrix_type< T, DiagonalShape >

struct Eigen::internal::plain_matrix_type< T, DiagonalShape >
template<typename T>
struct Eigen::internal::plain_matrix_type< T, DiagonalShape >
Class Members
typedef PlainObject type

◆ Eigen::internal::plain_matrix_type_dense

struct Eigen::internal::plain_matrix_type_dense
template<typename T, typename BaseClassType, int Flags>
struct Eigen::internal::plain_matrix_type_dense< T, BaseClassType, Flags >

◆ Eigen::internal::plain_matrix_type_dense< T, ArrayXpr, Flags >

struct Eigen::internal::plain_matrix_type_dense< T, ArrayXpr, Flags >
template<typename T, int Flags>
struct Eigen::internal::plain_matrix_type_dense< T, ArrayXpr, Flags >
Class Members
typedef Scalar, RowsAtCompileTime, ColsAtCompileTime, AutoAlign|(Flags &RowMajorBit ? RowMajor :ColMajor), MaxRowsAtCompileTime, MaxColsAtCompileTime > type

◆ Eigen::internal::plain_matrix_type_dense< T, MatrixXpr, Flags >

struct Eigen::internal::plain_matrix_type_dense< T, MatrixXpr, Flags >
template<typename T, int Flags>
struct Eigen::internal::plain_matrix_type_dense< T, MatrixXpr, Flags >
Class Members
typedef Scalar, RowsAtCompileTime, ColsAtCompileTime, AutoAlign|(Flags &RowMajorBit ? RowMajor :ColMajor), MaxRowsAtCompileTime, MaxColsAtCompileTime > type

◆ Eigen::internal::plain_object_eval

struct Eigen::internal::plain_object_eval
template<typename T, typename StorageKind = typename traits<T>::StorageKind>
struct Eigen::internal::plain_object_eval< T, StorageKind >

◆ Eigen::internal::plain_object_eval< T, Dense >

struct Eigen::internal::plain_object_eval< T, Dense >
template<typename T>
struct Eigen::internal::plain_object_eval< T, Dense >
Class Members
typedef XprKind, Flags >::type type

◆ Eigen::internal::plain_row_type

struct Eigen::internal::plain_row_type
template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
struct Eigen::internal::plain_row_type< ExpressionType, Scalar >
+ Inheritance diagram for Eigen::internal::plain_row_type< ExpressionType, Scalar >:
Class Members
typedef Array< Scalar, 1, ColsAtCompileTime, Options|RowMajor, 1, MaxColsAtCompileTime > ArrayRowType
typedef Matrix< Scalar, 1, ColsAtCompileTime, Options|RowMajor, 1, MaxColsAtCompileTime > MatrixRowType
typedef XprKind, MatrixXpr >::value, MatrixRowType, ArrayRowType >::type type

◆ Eigen::internal::product_evaluator

struct Eigen::internal::product_evaluator
template<typename T, int ProductTag = internal::product_type<typename T::Lhs,typename T::Rhs>::ret, typename LhsShape = typename evaluator_traits<typename T::Lhs>::Shape, typename RhsShape = typename evaluator_traits<typename T::Rhs>::Shape, typename LhsScalar = typename traits<typename T::Lhs>::Scalar, typename RhsScalar = typename traits<typename T::Rhs>::Scalar>
struct Eigen::internal::product_evaluator< T, ProductTag, LhsShape, RhsShape, LhsScalar, RhsScalar >
+ Inheritance diagram for Eigen::internal::product_evaluator< T, ProductTag, LhsShape, RhsShape, LhsScalar, RhsScalar >:

◆ Eigen::internal::product_promote_storage_type

struct Eigen::internal::product_promote_storage_type
template<typename A, typename B, int ProductTag>
struct Eigen::internal::product_promote_storage_type< A, B, ProductTag >

◆ Eigen::internal::product_promote_storage_type< A, A, ProductTag >

struct Eigen::internal::product_promote_storage_type< A, A, ProductTag >
template<typename A, int ProductTag>
struct Eigen::internal::product_promote_storage_type< A, A, ProductTag >
Class Members
typedef A ret

◆ Eigen::internal::product_promote_storage_type< A, Dense, ProductTag >

struct Eigen::internal::product_promote_storage_type< A, Dense, ProductTag >
template<typename A, int ProductTag>
struct Eigen::internal::product_promote_storage_type< A, Dense, ProductTag >
Class Members
typedef Dense ret

◆ Eigen::internal::product_promote_storage_type< A, DiagonalShape, ProductTag >

struct Eigen::internal::product_promote_storage_type< A, DiagonalShape, ProductTag >
template<typename A, int ProductTag>
struct Eigen::internal::product_promote_storage_type< A, DiagonalShape, ProductTag >
Class Members
typedef A ret

◆ Eigen::internal::product_promote_storage_type< A, PermutationStorage, ProductTag >

struct Eigen::internal::product_promote_storage_type< A, PermutationStorage, ProductTag >
template<typename A, int ProductTag>
struct Eigen::internal::product_promote_storage_type< A, PermutationStorage, ProductTag >
Class Members
typedef A ret

◆ Eigen::internal::product_promote_storage_type< Dense, B, ProductTag >

struct Eigen::internal::product_promote_storage_type< Dense, B, ProductTag >
template<typename B, int ProductTag>
struct Eigen::internal::product_promote_storage_type< Dense, B, ProductTag >
Class Members
typedef Dense ret

◆ Eigen::internal::product_promote_storage_type< Dense, Dense, ProductTag >

struct Eigen::internal::product_promote_storage_type< Dense, Dense, ProductTag >
template<int ProductTag>
struct Eigen::internal::product_promote_storage_type< Dense, Dense, ProductTag >
Class Members
typedef Dense ret

◆ Eigen::internal::product_promote_storage_type< Dense, DiagonalShape, ProductTag >

struct Eigen::internal::product_promote_storage_type< Dense, DiagonalShape, ProductTag >
template<int ProductTag>
struct Eigen::internal::product_promote_storage_type< Dense, DiagonalShape, ProductTag >
Class Members
typedef Dense ret

◆ Eigen::internal::product_promote_storage_type< Dense, PermutationStorage, ProductTag >

struct Eigen::internal::product_promote_storage_type< Dense, PermutationStorage, ProductTag >
template<int ProductTag>
struct Eigen::internal::product_promote_storage_type< Dense, PermutationStorage, ProductTag >
Class Members
typedef Dense ret

◆ Eigen::internal::product_promote_storage_type< Dense, Sparse, OuterProduct >

struct Eigen::internal::product_promote_storage_type< Dense, Sparse, OuterProduct >
Class Members
typedef Sparse ret

◆ Eigen::internal::product_promote_storage_type< DiagonalShape, B, ProductTag >

struct Eigen::internal::product_promote_storage_type< DiagonalShape, B, ProductTag >
template<typename B, int ProductTag>
struct Eigen::internal::product_promote_storage_type< DiagonalShape, B, ProductTag >
Class Members
typedef B ret

◆ Eigen::internal::product_promote_storage_type< DiagonalShape, Dense, ProductTag >

struct Eigen::internal::product_promote_storage_type< DiagonalShape, Dense, ProductTag >
template<int ProductTag>
struct Eigen::internal::product_promote_storage_type< DiagonalShape, Dense, ProductTag >
Class Members
typedef Dense ret

◆ Eigen::internal::product_promote_storage_type< PermutationStorage, B, ProductTag >

struct Eigen::internal::product_promote_storage_type< PermutationStorage, B, ProductTag >
template<typename B, int ProductTag>
struct Eigen::internal::product_promote_storage_type< PermutationStorage, B, ProductTag >
Class Members
typedef B ret

◆ Eigen::internal::product_promote_storage_type< PermutationStorage, Dense, ProductTag >

struct Eigen::internal::product_promote_storage_type< PermutationStorage, Dense, ProductTag >
template<int ProductTag>
struct Eigen::internal::product_promote_storage_type< PermutationStorage, Dense, ProductTag >
Class Members
typedef Dense ret

◆ Eigen::internal::product_promote_storage_type< PermutationStorage, Sparse, ProductTag >

struct Eigen::internal::product_promote_storage_type< PermutationStorage, Sparse, ProductTag >
template<int ProductTag>
struct Eigen::internal::product_promote_storage_type< PermutationStorage, Sparse, ProductTag >
Class Members
typedef Sparse ret

◆ Eigen::internal::product_promote_storage_type< Sparse, Dense, OuterProduct >

struct Eigen::internal::product_promote_storage_type< Sparse, Dense, OuterProduct >
Class Members
typedef Sparse ret

◆ Eigen::internal::product_promote_storage_type< Sparse, PermutationStorage, ProductTag >

struct Eigen::internal::product_promote_storage_type< Sparse, PermutationStorage, ProductTag >
template<int ProductTag>
struct Eigen::internal::product_promote_storage_type< Sparse, PermutationStorage, ProductTag >
Class Members
typedef Sparse ret

◆ Eigen::internal::product_selfadjoint_matrix

struct Eigen::internal::product_selfadjoint_matrix
template<typename Scalar, typename Index, int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs, int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs, int ResStorageOrder>
struct Eigen::internal::product_selfadjoint_matrix< Scalar, Index, LhsStorageOrder, LhsSelfAdjoint, ConjugateLhs, RhsStorageOrder, RhsSelfAdjoint, ConjugateRhs, ResStorageOrder >

◆ Eigen::internal::product_triangular_matrix_matrix

struct Eigen::internal::product_triangular_matrix_matrix
template<typename Scalar, typename Index, int Mode, bool LhsIsTriangular, int LhsStorageOrder, bool ConjugateLhs, int RhsStorageOrder, bool ConjugateRhs, int ResStorageOrder, int Version = Specialized>
struct Eigen::internal::product_triangular_matrix_matrix< Scalar, Index, Mode, LhsIsTriangular, LhsStorageOrder, ConjugateLhs, RhsStorageOrder, ConjugateRhs, ResStorageOrder, Version >
+ Inheritance diagram for Eigen::internal::product_triangular_matrix_matrix< Scalar, Index, Mode, LhsIsTriangular, LhsStorageOrder, ConjugateLhs, RhsStorageOrder, ConjugateRhs, ResStorageOrder, Version >:

◆ Eigen::internal::product_type_selector

struct Eigen::internal::product_type_selector
template<int Rows, int Cols, int Depth>
struct Eigen::internal::product_type_selector< Rows, Cols, Depth >

◆ Eigen::internal::promote_index_type

struct Eigen::internal::promote_index_type
template<typename I1, typename I2>
struct Eigen::internal::promote_index_type< I1, I2 >
Class Members
typedef type type

◆ Eigen::internal::promote_scalar_arg

struct Eigen::internal::promote_scalar_arg
template<typename ExprScalar, typename T, bool IsSupported>
struct Eigen::internal::promote_scalar_arg< ExprScalar, T, IsSupported >

◆ Eigen::internal::promote_scalar_arg< S, T, true >

struct Eigen::internal::promote_scalar_arg< S, T, true >
template<typename S, typename T>
struct Eigen::internal::promote_scalar_arg< S, T, true >
Class Members
typedef T type

◆ Eigen::internal::promote_scalar_arg_unsupported

struct Eigen::internal::promote_scalar_arg_unsupported
template<typename ExprScalar, typename T, typename PromotedType, bool ConvertibleToLiteral = internal::is_convertible<T,PromotedType>::value, bool IsSafe = NumTraits<T>::IsInteger || !NumTraits<PromotedType>::IsInteger>
struct Eigen::internal::promote_scalar_arg_unsupported< ExprScalar, T, PromotedType, ConvertibleToLiteral, IsSafe >
+ Inheritance diagram for Eigen::internal::promote_scalar_arg_unsupported< ExprScalar, T, PromotedType, ConvertibleToLiteral, IsSafe >:

◆ Eigen::internal::promote_scalar_arg_unsupported< S, T, PromotedType, ConvertibleToLiteral, false >

struct Eigen::internal::promote_scalar_arg_unsupported< S, T, PromotedType, ConvertibleToLiteral, false >
template<typename S, typename T, typename PromotedType, bool ConvertibleToLiteral>
struct Eigen::internal::promote_scalar_arg_unsupported< S, T, PromotedType, ConvertibleToLiteral, false >

◆ Eigen::internal::promote_scalar_arg_unsupported< S, T, PromotedType, true, true >

struct Eigen::internal::promote_scalar_arg_unsupported< S, T, PromotedType, true, true >
template<typename S, typename T, typename PromotedType>
struct Eigen::internal::promote_scalar_arg_unsupported< S, T, PromotedType, true, true >
Class Members
typedef PromotedType type

◆ Eigen::internal::promote_scalar_arg_unsupported< S, T, S, false, true >

struct Eigen::internal::promote_scalar_arg_unsupported< S, T, S, false, true >
template<typename S, typename T>
struct Eigen::internal::promote_scalar_arg_unsupported< S, T, S, false, true >

◆ Eigen::internal::promote_storage_type

struct Eigen::internal::promote_storage_type
template<typename A, typename B>
struct Eigen::internal::promote_storage_type< A, B >

◆ Eigen::internal::promote_storage_type< A, A >

struct Eigen::internal::promote_storage_type< A, A >
template<typename A>
struct Eigen::internal::promote_storage_type< A, A >
Class Members
typedef A ret

◆ Eigen::internal::promote_storage_type< A, const A >

struct Eigen::internal::promote_storage_type< A, const A >
template<typename A>
struct Eigen::internal::promote_storage_type< A, const A >
Class Members
typedef A ret

◆ Eigen::internal::promote_storage_type< const A, A >

struct Eigen::internal::promote_storage_type< const A, A >
template<typename A>
struct Eigen::internal::promote_storage_type< const A, A >
Class Members
typedef A ret

◆ Eigen::internal::qr_preconditioner_impl

struct Eigen::internal::qr_preconditioner_impl
template<typename MatrixType, int QRPreconditioner, int Case, bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret>
struct Eigen::internal::qr_preconditioner_impl< MatrixType, QRPreconditioner, Case, DoAnything >
+ Inheritance diagram for Eigen::internal::qr_preconditioner_impl< MatrixType, QRPreconditioner, Case, DoAnything >:

◆ Eigen::internal::quaternionbase_assign_impl

struct Eigen::internal::quaternionbase_assign_impl
template<typename Other, int OtherRows = Other::RowsAtCompileTime, int OtherCols = Other::ColsAtCompileTime>
struct Eigen::internal::quaternionbase_assign_impl< Other, OtherRows, OtherCols >

◆ Eigen::internal::random_default_impl

struct Eigen::internal::random_default_impl
template<typename Scalar, bool IsComplex, bool IsInteger>
struct Eigen::internal::random_default_impl< Scalar, IsComplex, IsInteger >
+ Inheritance diagram for Eigen::internal::random_default_impl< Scalar, IsComplex, IsInteger >:

◆ Eigen::internal::random_retval

struct Eigen::internal::random_retval
template<typename Scalar>
struct Eigen::internal::random_retval< Scalar >
Class Members
typedef Scalar type

◆ Eigen::internal::real_ref_retval

struct Eigen::internal::real_ref_retval
template<typename Scalar>
struct Eigen::internal::real_ref_retval< Scalar >
Class Members
typedef Real & type

◆ Eigen::internal::real_retval

struct Eigen::internal::real_retval
template<typename Scalar>
struct Eigen::internal::real_retval< Scalar >
Class Members
typedef Real type

◆ Eigen::internal::redux_impl

struct Eigen::internal::redux_impl
template<typename Func, typename Derived, int Traversal = redux_traits<Func, Derived>::Traversal, int Unrolling = redux_traits<Func, Derived>::Unrolling>
struct Eigen::internal::redux_impl< Func, Derived, Traversal, Unrolling >

◆ Eigen::internal::ref_selector

struct Eigen::internal::ref_selector
template<typename T>
struct Eigen::internal::ref_selector< T >
+ Inheritance diagram for Eigen::internal::ref_selector< T >:
Class Members
typedef Flags &NestByRefBit), T &, T >::type non_const_type
typedef Flags &NestByRefBit), Tconst &, constT >::type type

◆ Eigen::internal::remove_all

struct Eigen::internal::remove_all
template<typename T>
struct Eigen::internal::remove_all< T >
Class Members
typedef T type

◆ Eigen::internal::remove_all< const T >

struct Eigen::internal::remove_all< const T >
template<typename T>
struct Eigen::internal::remove_all< const T >
Class Members
typedef type type

◆ Eigen::internal::remove_all< T & >

struct Eigen::internal::remove_all< T & >
template<typename T>
struct Eigen::internal::remove_all< T & >
Class Members
typedef type type

◆ Eigen::internal::remove_all< T * >

struct Eigen::internal::remove_all< T * >
template<typename T>
struct Eigen::internal::remove_all< T * >
Class Members
typedef type type

◆ Eigen::internal::remove_all< T const & >

struct Eigen::internal::remove_all< T const & >
template<typename T>
struct Eigen::internal::remove_all< T const & >
Class Members
typedef type type

◆ Eigen::internal::remove_all< T const * >

struct Eigen::internal::remove_all< T const * >
template<typename T>
struct Eigen::internal::remove_all< T const * >
Class Members
typedef type type

◆ Eigen::internal::remove_const

struct Eigen::internal::remove_const
template<class T>
struct Eigen::internal::remove_const< T >
Class Members
typedef T type

◆ Eigen::internal::remove_const< const T >

struct Eigen::internal::remove_const< const T >
template<class T>
struct Eigen::internal::remove_const< const T >
Class Members
typedef T type

◆ Eigen::internal::remove_const< const T[]>

struct Eigen::internal::remove_const< const T[]>
template<class T>
struct Eigen::internal::remove_const< const T[]>
Class Members
typedef T type

◆ Eigen::internal::remove_const< const T[Size]>

struct Eigen::internal::remove_const< const T[Size]>
template<class T, unsigned int Size>
struct Eigen::internal::remove_const< const T[Size]>
Class Members
typedef T type

◆ Eigen::internal::remove_pointer

struct Eigen::internal::remove_pointer
template<typename T>
struct Eigen::internal::remove_pointer< T >
Class Members
typedef T type

◆ Eigen::internal::remove_pointer< T * >

struct Eigen::internal::remove_pointer< T * >
template<typename T>
struct Eigen::internal::remove_pointer< T * >
Class Members
typedef T type

◆ Eigen::internal::remove_pointer< T *const >

struct Eigen::internal::remove_pointer< T *const >
template<typename T>
struct Eigen::internal::remove_pointer< T *const >
Class Members
typedef T type

◆ Eigen::internal::remove_reference

struct Eigen::internal::remove_reference
template<typename T>
struct Eigen::internal::remove_reference< T >
Class Members
typedef T type

◆ Eigen::internal::remove_reference< T & >

struct Eigen::internal::remove_reference< T & >
template<typename T>
struct Eigen::internal::remove_reference< T & >
Class Members
typedef T type

◆ Eigen::internal::result_of

struct Eigen::internal::result_of
template<typename T>
struct Eigen::internal::result_of< T >

◆ Eigen::internal::result_of< scalar_cmp_op< LhsScalar, RhsScalar, Cmp >(LhsScalar, RhsScalar)>

struct Eigen::internal::result_of< scalar_cmp_op< LhsScalar, RhsScalar, Cmp >(LhsScalar, RhsScalar)>
template<ComparisonName Cmp, typename LhsScalar, typename RhsScalar>
struct Eigen::internal::result_of< scalar_cmp_op< LhsScalar, RhsScalar, Cmp >(LhsScalar, RhsScalar)>
Class Members
typedef bool type

◆ Eigen::internal::rotation_base_generic_product_selector

struct Eigen::internal::rotation_base_generic_product_selector
template<typename RotationDerived, typename MatrixType, bool IsVector = MatrixType::IsVectorAtCompileTime>
struct Eigen::internal::rotation_base_generic_product_selector< RotationDerived, MatrixType, IsVector >

◆ Eigen::internal::round_retval

struct Eigen::internal::round_retval
template<typename Scalar>
struct Eigen::internal::round_retval< Scalar >
Class Members
typedef Scalar type

◆ Eigen::internal::scalar_betainc_op

struct Eigen::internal::scalar_betainc_op
template<typename Scalar>
struct Eigen::internal::scalar_betainc_op< Scalar >

◆ Eigen::internal::scalar_cmp_op

struct Eigen::internal::scalar_cmp_op
template<typename LhsScalar, typename RhsScalar, ComparisonName cmp>
struct Eigen::internal::scalar_cmp_op< LhsScalar, RhsScalar, cmp >

◆ Eigen::internal::scalar_digamma_op

struct Eigen::internal::scalar_digamma_op
template<typename Scalar>
struct Eigen::internal::scalar_digamma_op< Scalar >

◆ Eigen::internal::scalar_erf_op

struct Eigen::internal::scalar_erf_op
template<typename Scalar>
struct Eigen::internal::scalar_erf_op< Scalar >

◆ Eigen::internal::scalar_erfc_op

struct Eigen::internal::scalar_erfc_op
template<typename Scalar>
struct Eigen::internal::scalar_erfc_op< Scalar >

◆ Eigen::internal::scalar_fuzzy_default_impl

struct Eigen::internal::scalar_fuzzy_default_impl
template<typename Scalar, bool IsComplex, bool IsInteger>
struct Eigen::internal::scalar_fuzzy_default_impl< Scalar, IsComplex, IsInteger >
+ Inheritance diagram for Eigen::internal::scalar_fuzzy_default_impl< Scalar, IsComplex, IsInteger >:

◆ Eigen::internal::scalar_hypot_op

struct Eigen::internal::scalar_hypot_op
template<typename LhsScalar, typename RhsScalar = LhsScalar>
struct Eigen::internal::scalar_hypot_op< LhsScalar, RhsScalar >

◆ Eigen::internal::scalar_igamma_op

struct Eigen::internal::scalar_igamma_op
template<typename Scalar>
struct Eigen::internal::scalar_igamma_op< Scalar >

◆ Eigen::internal::scalar_igammac_op

struct Eigen::internal::scalar_igammac_op
template<typename Scalar>
struct Eigen::internal::scalar_igammac_op< Scalar >

◆ Eigen::internal::scalar_lgamma_op

struct Eigen::internal::scalar_lgamma_op
template<typename Scalar>
struct Eigen::internal::scalar_lgamma_op< Scalar >

◆ Eigen::internal::scalar_sign_op

struct Eigen::internal::scalar_sign_op
template<typename Scalar, bool iscpx = (NumTraits<Scalar>::IsComplex!=0)>
struct Eigen::internal::scalar_sign_op< Scalar, iscpx >

◆ Eigen::internal::scalar_zeta_op

struct Eigen::internal::scalar_zeta_op
template<typename Scalar>
struct Eigen::internal::scalar_zeta_op< Scalar >

◆ Eigen::internal::Selector

struct Eigen::internal::Selector
template<size_t N>
struct Eigen::internal::Selector< N >
Class Members
bool select[N]

◆ Eigen::internal::selfadjoint_product_impl

struct Eigen::internal::selfadjoint_product_impl
template<typename Lhs, int LhsMode, bool LhsIsVector, typename Rhs, int RhsMode, bool RhsIsVector>
struct Eigen::internal::selfadjoint_product_impl< Lhs, LhsMode, LhsIsVector, Rhs, RhsMode, RhsIsVector >

◆ Eigen::internal::selfadjoint_rank2_update_selector

struct Eigen::internal::selfadjoint_rank2_update_selector
template<typename Scalar, typename Index, typename UType, typename VType, int UpLo>
struct Eigen::internal::selfadjoint_rank2_update_selector< Scalar, Index, UType, VType, UpLo >

◆ Eigen::internal::smart_copy_helper

struct Eigen::internal::smart_copy_helper
template<typename T, bool UseMemcpy>
struct Eigen::internal::smart_copy_helper< T, UseMemcpy >

◆ Eigen::internal::smart_memmove_helper

struct Eigen::internal::smart_memmove_helper
template<typename T, bool UseMemmove>
struct Eigen::internal::smart_memmove_helper< T, UseMemmove >

◆ Eigen::internal::solve_traits

struct Eigen::internal::solve_traits
template<typename Decomposition, typename RhsType, typename StorageKind>
struct Eigen::internal::solve_traits< Decomposition, RhsType, StorageKind >

◆ Eigen::internal::solve_traits< Decomposition, RhsType, Dense >

struct Eigen::internal::solve_traits< Decomposition, RhsType, Dense >
template<typename Decomposition, typename RhsType>
struct Eigen::internal::solve_traits< Decomposition, RhsType, Dense >
Class Members
typedef type PlainObject

◆ Eigen::internal::solve_traits< Decomposition, RhsType, Sparse >

struct Eigen::internal::solve_traits< Decomposition, RhsType, Sparse >
template<typename Decomposition, typename RhsType>
struct Eigen::internal::solve_traits< Decomposition, RhsType, Sparse >
Class Members
typedef Flags >::type PlainObject

◆ Eigen::internal::Sparse2Dense

struct Eigen::internal::Sparse2Dense

◆ Eigen::internal::Sparse2Sparse

struct Eigen::internal::Sparse2Sparse

◆ Eigen::internal::sparse_conjunction_evaluator

struct Eigen::internal::sparse_conjunction_evaluator
template<typename T, typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind, typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind, typename LhsScalar = typename traits<typename T::Lhs>::Scalar, typename RhsScalar = typename traits<typename T::Rhs>::Scalar>
struct Eigen::internal::sparse_conjunction_evaluator< T, LhsKind, RhsKind, LhsScalar, RhsScalar >
+ Inheritance diagram for Eigen::internal::sparse_conjunction_evaluator< T, LhsKind, RhsKind, LhsScalar, RhsScalar >:

◆ Eigen::internal::sparse_diagonal_product_evaluator

struct Eigen::internal::sparse_diagonal_product_evaluator
template<typename SparseXprType, typename DiagonalCoeffType, int SDP_Tag>
struct Eigen::internal::sparse_diagonal_product_evaluator< SparseXprType, DiagonalCoeffType, SDP_Tag >
+ Inheritance diagram for Eigen::internal::sparse_diagonal_product_evaluator< SparseXprType, DiagonalCoeffType, SDP_Tag >:

◆ Eigen::internal::sparse_eval< T, 1, 1, Flags >

struct Eigen::internal::sparse_eval< T, 1, 1, Flags >
template<typename T, int Flags>
struct Eigen::internal::sparse_eval< T, 1, 1, Flags >
Class Members
typedef Scalar _Scalar
typedef Matrix< _Scalar, 1, 1 > type

◆ Eigen::internal::sparse_eval< T, 1, Cols, Flags >

struct Eigen::internal::sparse_eval< T, 1, Cols, Flags >
template<typename T, int Cols, int Flags>
struct Eigen::internal::sparse_eval< T, 1, Cols, Flags >
Class Members
typedef Scalar _Scalar
typedef StorageIndex _StorageIndex
typedef SparseVector< _Scalar, RowMajor, _StorageIndex > type

◆ Eigen::internal::sparse_eval< T, Rows, 1, Flags >

struct Eigen::internal::sparse_eval< T, Rows, 1, Flags >
template<typename T, int Rows, int Flags>
struct Eigen::internal::sparse_eval< T, Rows, 1, Flags >
Class Members
typedef Scalar _Scalar
typedef StorageIndex _StorageIndex
typedef SparseVector< _Scalar, ColMajor, _StorageIndex > type

◆ Eigen::internal::sparse_solve_triangular_selector

struct Eigen::internal::sparse_solve_triangular_selector
template<typename Lhs, typename Rhs, int Mode, int UpLo = (Mode & Lower) ? Lower : (Mode & Upper) ? Upper : -1, int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
struct Eigen::internal::sparse_solve_triangular_selector< Lhs, Rhs, Mode, UpLo, StorageOrder >

◆ Eigen::internal::sparse_solve_triangular_sparse_selector

struct Eigen::internal::sparse_solve_triangular_sparse_selector
template<typename Lhs, typename Rhs, int Mode, int UpLo = (Mode & Lower) ? Lower : (Mode & Upper) ? Upper : -1, int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
struct Eigen::internal::sparse_solve_triangular_sparse_selector< Lhs, Rhs, Mode, UpLo, StorageOrder >

◆ Eigen::internal::sparse_sparse_product_with_pruning_selector

struct Eigen::internal::sparse_sparse_product_with_pruning_selector
template<typename Lhs, typename Rhs, typename ResultType, int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit, int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit, int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
struct Eigen::internal::sparse_sparse_product_with_pruning_selector< Lhs, Rhs, ResultType, LhsStorageOrder, RhsStorageOrder, ResStorageOrder >

◆ Eigen::internal::sparse_sparse_to_dense_product_selector

struct Eigen::internal::sparse_sparse_to_dense_product_selector
template<typename Lhs, typename Rhs, typename ResultType, int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor, int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor>
struct Eigen::internal::sparse_sparse_to_dense_product_selector< Lhs, Rhs, ResultType, LhsStorageOrder, RhsStorageOrder >

◆ Eigen::internal::sparse_time_dense_product_impl

struct Eigen::internal::sparse_time_dense_product_impl
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType, int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor, bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct Eigen::internal::sparse_time_dense_product_impl< SparseLhsType, DenseRhsType, DenseResType, AlphaType, LhsStorageOrder, ColPerCol >

◆ Eigen::internal::sparse_vector_assign_selector

struct Eigen::internal::sparse_vector_assign_selector
template<typename Dest, typename Src, int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch : Src::InnerSizeAtCompileTime==1 ? SVA_Outer : SVA_Inner>
struct Eigen::internal::sparse_vector_assign_selector< Dest, Src, AssignmentKind >

◆ Eigen::internal::SparseSelfAdjoint2Sparse

struct Eigen::internal::SparseSelfAdjoint2Sparse

◆ Eigen::internal::static_assertion

struct Eigen::internal::static_assertion
template<bool condition>
struct Eigen::internal::static_assertion< condition >

◆ Eigen::internal::stem_function

struct Eigen::internal::stem_function
template<typename Scalar>
struct Eigen::internal::stem_function< Scalar >
Class Members
typedef Real > ComplexScalar
typedef ComplexScalar type

◆ Eigen::internal::storage_kind_to_evaluator_kind

struct Eigen::internal::storage_kind_to_evaluator_kind
template<typename StorageKind>
struct Eigen::internal::storage_kind_to_evaluator_kind< StorageKind >
Class Members
typedef IndexBased Kind

◆ Eigen::internal::storage_kind_to_evaluator_kind< Sparse >

struct Eigen::internal::storage_kind_to_evaluator_kind< Sparse >
Class Members
typedef IteratorBased Kind

◆ Eigen::internal::storage_kind_to_shape

struct Eigen::internal::storage_kind_to_shape
template<typename StorageKind>
struct Eigen::internal::storage_kind_to_shape< StorageKind >

◆ Eigen::internal::storage_kind_to_shape< Dense >

struct Eigen::internal::storage_kind_to_shape< Dense >
Class Members
typedef DenseShape Shape

◆ Eigen::internal::storage_kind_to_shape< DiagonalShape >

struct Eigen::internal::storage_kind_to_shape< DiagonalShape >
Class Members
typedef DiagonalShape Shape

◆ Eigen::internal::storage_kind_to_shape< PermutationStorage >

struct Eigen::internal::storage_kind_to_shape< PermutationStorage >
Class Members
typedef PermutationShape Shape

◆ Eigen::internal::storage_kind_to_shape< SolverStorage >

struct Eigen::internal::storage_kind_to_shape< SolverStorage >
Class Members
typedef SolverShape Shape

◆ Eigen::internal::storage_kind_to_shape< Sparse >

struct Eigen::internal::storage_kind_to_shape< Sparse >
Class Members
typedef SparseShape Shape

◆ Eigen::internal::storage_kind_to_shape< TranspositionsStorage >

struct Eigen::internal::storage_kind_to_shape< TranspositionsStorage >
Class Members
typedef TranspositionsShape Shape

◆ Eigen::internal::svd_precondition_2x2_block_to_be_real

struct Eigen::internal::svd_precondition_2x2_block_to_be_real
template<typename MatrixType, int QRPreconditioner, bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
struct Eigen::internal::svd_precondition_2x2_block_to_be_real< MatrixType, QRPreconditioner, IsComplex >

◆ Eigen::internal::ternary_evaluator

struct Eigen::internal::ternary_evaluator
template<typename T, typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind, typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind, typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind, typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar, typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar, typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar>
struct Eigen::internal::ternary_evaluator< T, Arg1Kind, Arg2Kind, Arg3Kind, Arg1Scalar, Arg2Scalar, Arg3Scalar >
+ Inheritance diagram for Eigen::internal::ternary_evaluator< T, Arg1Kind, Arg2Kind, Arg3Kind, Arg1Scalar, Arg2Scalar, Arg3Scalar >:

◆ Eigen::internal::ternary_result_of_select

struct Eigen::internal::ternary_result_of_select
template<typename Func, typename ArgType0, typename ArgType1, typename ArgType2, int SizeOf = sizeof(has_none)>
struct Eigen::internal::ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, SizeOf >
Class Members
typedef type type

◆ Eigen::internal::ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, sizeof(has_std_result_type)>

struct Eigen::internal::ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, sizeof(has_std_result_type)>
template<typename Func, typename ArgType0, typename ArgType1, typename ArgType2>
struct Eigen::internal::ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, sizeof(has_std_result_type)>
Class Members
typedef result_type type

◆ Eigen::internal::ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, sizeof(has_tr1_result)>

struct Eigen::internal::ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, sizeof(has_tr1_result)>
template<typename Func, typename ArgType0, typename ArgType1, typename ArgType2>
struct Eigen::internal::ternary_result_of_select< Func, ArgType0, ArgType1, ArgType2, sizeof(has_tr1_result)>
Class Members
typedef template type type

◆ Eigen::internal::traits< AngleAxis< _Scalar > >

struct Eigen::internal::traits< AngleAxis< _Scalar > >
template<typename _Scalar>
struct Eigen::internal::traits< AngleAxis< _Scalar > >
Class Members
typedef _Scalar Scalar

◆ Eigen::internal::traits< BDCSVD< _MatrixType > >

struct Eigen::internal::traits< BDCSVD< _MatrixType > >
template<typename _MatrixType>
struct Eigen::internal::traits< BDCSVD< _MatrixType > >
Class Members
typedef _MatrixType MatrixType

◆ Eigen::internal::traits< BiCGSTAB< _MatrixType, _Preconditioner > >

struct Eigen::internal::traits< BiCGSTAB< _MatrixType, _Preconditioner > >
template<typename _MatrixType, typename _Preconditioner>
struct Eigen::internal::traits< BiCGSTAB< _MatrixType, _Preconditioner > >
Class Members
typedef _MatrixType MatrixType
typedef _Preconditioner Preconditioner

◆ Eigen::internal::traits< BlockSparseMatrixView< BlockSparseMatrixT > >

struct Eigen::internal::traits< BlockSparseMatrixView< BlockSparseMatrixT > >
template<typename BlockSparseMatrixT>
struct Eigen::internal::traits< BlockSparseMatrixView< BlockSparseMatrixT > >
Class Members
typedef Ref< Matrix< typename RealScalar, BlockSize, BlockSize > > RealScalar
typedef Ref< Matrix< typename Scalar, BlockSize, BlockSize > > Scalar

◆ Eigen::internal::traits< ConjugateGradient< _MatrixType, _UpLo, _Preconditioner > >

struct Eigen::internal::traits< ConjugateGradient< _MatrixType, _UpLo, _Preconditioner > >
template<typename _MatrixType, int _UpLo, typename _Preconditioner>
struct Eigen::internal::traits< ConjugateGradient< _MatrixType, _UpLo, _Preconditioner > >
Class Members
typedef _MatrixType MatrixType
typedef _Preconditioner Preconditioner

◆ Eigen::internal::traits< FullPivHouseholderQRMatrixQReturnType< MatrixType > >

struct Eigen::internal::traits< FullPivHouseholderQRMatrixQReturnType< MatrixType > >
template<typename MatrixType>
struct Eigen::internal::traits< FullPivHouseholderQRMatrixQReturnType< MatrixType > >
Class Members
typedef PlainObject ReturnType

◆ Eigen::internal::traits< HessenbergDecompositionMatrixHReturnType< MatrixType > >

struct Eigen::internal::traits< HessenbergDecompositionMatrixHReturnType< MatrixType > >
template<typename MatrixType>
struct Eigen::internal::traits< HessenbergDecompositionMatrixHReturnType< MatrixType > >
Class Members
typedef MatrixType ReturnType

◆ Eigen::internal::traits< homogeneous_left_product_impl< Homogeneous< MatrixType, Vertical >, Lhs > >

struct Eigen::internal::traits< homogeneous_left_product_impl< Homogeneous< MatrixType, Vertical >, Lhs > >
template<typename MatrixType, typename Lhs>
struct Eigen::internal::traits< homogeneous_left_product_impl< Homogeneous< MatrixType, Vertical >, Lhs > >
Class Members
typedef type LhsMatrixType
typedef type LhsMatrixTypeCleaned
typedef type MatrixTypeCleaned
typedef Scalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime >::type ReturnType

◆ Eigen::internal::traits< homogeneous_right_product_impl< Homogeneous< MatrixType, Horizontal >, Rhs > >

struct Eigen::internal::traits< homogeneous_right_product_impl< Homogeneous< MatrixType, Horizontal >, Rhs > >
template<typename MatrixType, typename Rhs>
struct Eigen::internal::traits< homogeneous_right_product_impl< Homogeneous< MatrixType, Horizontal >, Rhs > >
Class Members
typedef Scalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime >::type ReturnType

◆ Eigen::internal::traits< image_retval_base< DecompositionType > >

struct Eigen::internal::traits< image_retval_base< DecompositionType > >
template<typename DecompositionType>
struct Eigen::internal::traits< image_retval_base< DecompositionType > >
Class Members
typedef MatrixType MatrixType
typedef Matrix< typename Scalar, RowsAtCompileTime, Dynamic, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime > ReturnType

◆ Eigen::internal::traits< JacobiSVD< _MatrixType, QRPreconditioner > >

struct Eigen::internal::traits< JacobiSVD< _MatrixType, QRPreconditioner > >
template<typename _MatrixType, int QRPreconditioner>
struct Eigen::internal::traits< JacobiSVD< _MatrixType, QRPreconditioner > >
Class Members
typedef _MatrixType MatrixType

◆ Eigen::internal::traits< kernel_retval_base< DecompositionType > >

struct Eigen::internal::traits< kernel_retval_base< DecompositionType > >
template<typename DecompositionType>
struct Eigen::internal::traits< kernel_retval_base< DecompositionType > >
Class Members
typedef MatrixType MatrixType
typedef Matrix< typename Scalar, ColsAtCompileTime, Dynamic, Options, MaxColsAtCompileTime, MaxColsAtCompileTime > ReturnType

◆ Eigen::internal::traits< LeastSquaresConjugateGradient< _MatrixType, _Preconditioner > >

struct Eigen::internal::traits< LeastSquaresConjugateGradient< _MatrixType, _Preconditioner > >
template<typename _MatrixType, typename _Preconditioner>
struct Eigen::internal::traits< LeastSquaresConjugateGradient< _MatrixType, _Preconditioner > >
Class Members
typedef _MatrixType MatrixType
typedef _Preconditioner Preconditioner

◆ Eigen::internal::traits< Rotation2D< _Scalar > >

struct Eigen::internal::traits< Rotation2D< _Scalar > >
template<typename _Scalar>
struct Eigen::internal::traits< Rotation2D< _Scalar > >
Class Members
typedef _Scalar Scalar

◆ Eigen::internal::traits< SparseQR_QProduct< SparseQRType, Derived > >

struct Eigen::internal::traits< SparseQR_QProduct< SparseQRType, Derived > >
template<typename SparseQRType, typename Derived>
struct Eigen::internal::traits< SparseQR_QProduct< SparseQRType, Derived > >
Class Members
typedef PlainObject ReturnType

◆ Eigen::internal::traits< SparseQRMatrixQTransposeReturnType< SparseQRType > >

struct Eigen::internal::traits< SparseQRMatrixQTransposeReturnType< SparseQRType > >
template<typename SparseQRType>
struct Eigen::internal::traits< SparseQRMatrixQTransposeReturnType< SparseQRType > >
Class Members
typedef MatrixType ReturnType

◆ Eigen::internal::traits< SPQR_QProduct< SPQRType, Derived > >

struct Eigen::internal::traits< SPQR_QProduct< SPQRType, Derived > >
template<typename SPQRType, typename Derived>
struct Eigen::internal::traits< SPQR_QProduct< SPQRType, Derived > >
Class Members
typedef PlainObject ReturnType

◆ Eigen::internal::traits< SPQRMatrixQReturnType< SPQRType > >

struct Eigen::internal::traits< SPQRMatrixQReturnType< SPQRType > >
template<typename SPQRType>
struct Eigen::internal::traits< SPQRMatrixQReturnType< SPQRType > >
Class Members
typedef MatrixType ReturnType

◆ Eigen::internal::traits< SPQRMatrixQTransposeReturnType< SPQRType > >

struct Eigen::internal::traits< SPQRMatrixQTransposeReturnType< SPQRType > >
template<typename SPQRType>
struct Eigen::internal::traits< SPQRMatrixQTransposeReturnType< SPQRType > >
Class Members
typedef MatrixType ReturnType

◆ Eigen::internal::traits< triangular_solve_retval< Side, TriangularType, Rhs > >

struct Eigen::internal::traits< triangular_solve_retval< Side, TriangularType, Rhs > >
template<int Side, typename TriangularType, typename Rhs>
struct Eigen::internal::traits< triangular_solve_retval< Side, TriangularType, Rhs > >
Class Members
typedef type ReturnType

◆ Eigen::internal::transfer_constness

struct Eigen::internal::transfer_constness
template<typename T1, typename T2>
struct Eigen::internal::transfer_constness< T1, T2 >
Class Members
typedef value), type, T2 >::type type

◆ Eigen::internal::transform_construct_from_matrix

struct Eigen::internal::transform_construct_from_matrix
template<typename Other, int Mode, int Options, int Dim, int HDim, int OtherRows = Other::RowsAtCompileTime, int OtherCols = Other::ColsAtCompileTime>
struct Eigen::internal::transform_construct_from_matrix< Other, Mode, Options, Dim, HDim, OtherRows, OtherCols >

◆ Eigen::internal::transform_left_product_impl

struct Eigen::internal::transform_left_product_impl
template<typename Other, int Mode, int Options, int Dim, int HDim, int OtherRows = Other::RowsAtCompileTime, int OtherCols = Other::ColsAtCompileTime>
struct Eigen::internal::transform_left_product_impl< Other, Mode, Options, Dim, HDim, OtherRows, OtherCols >

◆ Eigen::internal::transform_right_product_impl

struct Eigen::internal::transform_right_product_impl
template<typename TransformType, typename MatrixType, int Case = transform_traits<TransformType>::IsProjective ? 0 : int(MatrixType::RowsAtCompileTime) == int(transform_traits<TransformType>::HDim) ? 1 : 2, int RhsCols = MatrixType::ColsAtCompileTime>
struct Eigen::internal::transform_right_product_impl< TransformType, MatrixType, Case, RhsCols >

◆ Eigen::internal::transform_transform_product_impl

struct Eigen::internal::transform_transform_product_impl
template<typename Lhs, typename Rhs, bool AnyProjective = transform_traits<Lhs>::IsProjective || transform_traits<Rhs>::IsProjective>
struct Eigen::internal::transform_transform_product_impl< Lhs, Rhs, AnyProjective >

◆ Eigen::internal::TransposeImpl_base

struct Eigen::internal::TransposeImpl_base
template<typename MatrixType, bool HasDirectAccess = has_direct_access<MatrixType>::ret>
struct Eigen::internal::TransposeImpl_base< MatrixType, HasDirectAccess >
Class Members
typedef dense_xpr_base< Transpose< MatrixType > >::type type

◆ Eigen::internal::TransposeImpl_base< MatrixType, false >

struct Eigen::internal::TransposeImpl_base< MatrixType, false >
template<typename MatrixType>
struct Eigen::internal::TransposeImpl_base< MatrixType, false >
Class Members
typedef dense_xpr_base< Transpose< MatrixType > >::type type

◆ Eigen::internal::Triangular2Dense

struct Eigen::internal::Triangular2Dense

◆ Eigen::internal::Triangular2Triangular

struct Eigen::internal::Triangular2Triangular

◆ Eigen::internal::triangular_matrix_vector_product

struct Eigen::internal::triangular_matrix_vector_product
template<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder, int Version = Specialized>
struct Eigen::internal::triangular_matrix_vector_product< Index, Mode, LhsScalar, ConjLhs, RhsScalar, ConjRhs, StorageOrder, Version >
+ Inheritance diagram for Eigen::internal::triangular_matrix_vector_product< Index, Mode, LhsScalar, ConjLhs, RhsScalar, ConjRhs, StorageOrder, Version >:

◆ Eigen::internal::triangular_product_impl

struct Eigen::internal::triangular_product_impl
template<int Mode, bool LhsIsTriangular, typename Lhs, bool LhsIsVector, typename Rhs, bool RhsIsVector>
struct Eigen::internal::triangular_product_impl< Mode, LhsIsTriangular, Lhs, LhsIsVector, Rhs, RhsIsVector >

◆ Eigen::internal::triangular_solve_matrix

struct Eigen::internal::triangular_solve_matrix
template<typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>
struct Eigen::internal::triangular_solve_matrix< Scalar, Index, Side, Mode, Conjugate, TriStorageOrder, OtherStorageOrder >

◆ Eigen::internal::triangular_solve_vector

struct Eigen::internal::triangular_solve_vector
template<typename LhsScalar, typename RhsScalar, typename Index, int Side, int Mode, bool Conjugate, int StorageOrder>
struct Eigen::internal::triangular_solve_vector< LhsScalar, RhsScalar, Index, Side, Mode, Conjugate, StorageOrder >

◆ Eigen::internal::triangular_solver_selector

struct Eigen::internal::triangular_solver_selector
template<typename Lhs, typename Rhs, int Side, int Mode, int Unrolling = trsolve_traits<Lhs,Rhs,Side>::Unrolling, int RhsVectors = trsolve_traits<Lhs,Rhs,Side>::RhsVectors>
struct Eigen::internal::triangular_solver_selector< Lhs, Rhs, Side, Mode, Unrolling, RhsVectors >

◆ Eigen::internal::triangular_solver_unroller

struct Eigen::internal::triangular_solver_unroller
template<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size, bool Stop = LoopIndex==Size>
struct Eigen::internal::triangular_solver_unroller< Lhs, Rhs, Mode, LoopIndex, Size, Stop >

◆ Eigen::internal::trmv_selector

struct Eigen::internal::trmv_selector
template<int Mode, int StorageOrder>
struct Eigen::internal::trmv_selector< Mode, StorageOrder >

◆ Eigen::internal::unary_result_of_select

struct Eigen::internal::unary_result_of_select
template<typename Func, typename ArgType, int SizeOf = sizeof(has_none)>
struct Eigen::internal::unary_result_of_select< Func, ArgType, SizeOf >
Class Members
typedef type type

◆ Eigen::internal::unary_result_of_select< Func, ArgType, sizeof(has_std_result_type)>

struct Eigen::internal::unary_result_of_select< Func, ArgType, sizeof(has_std_result_type)>
template<typename Func, typename ArgType>
struct Eigen::internal::unary_result_of_select< Func, ArgType, sizeof(has_std_result_type)>
Class Members
typedef result_type type

◆ Eigen::internal::unary_result_of_select< Func, ArgType, sizeof(has_tr1_result)>

struct Eigen::internal::unary_result_of_select< Func, ArgType, sizeof(has_tr1_result)>
template<typename Func, typename ArgType>
struct Eigen::internal::unary_result_of_select< Func, ArgType, sizeof(has_tr1_result)>
Class Members
typedef template type type

◆ Eigen::internal::unpacket_traits< DoublePacket< Packet > >

struct Eigen::internal::unpacket_traits< DoublePacket< Packet > >
template<typename Packet>
struct Eigen::internal::unpacket_traits< DoublePacket< Packet > >
Class Members
typedef DoublePacket< Packet > half

◆ Eigen::internal::vectorwise_reverse_inplace_impl

struct Eigen::internal::vectorwise_reverse_inplace_impl
template<int Direction>
struct Eigen::internal::vectorwise_reverse_inplace_impl< Direction >

Typedef Documentation

◆ IntPtr

typedef std::ptrdiff_t Eigen::internal::IntPtr

◆ Packet16f

◆ Packet16i

typedef __m512i Eigen::internal::Packet16i

◆ Packet16uc

typedef __vector unsigned char Eigen::internal::Packet16uc

◆ Packet2d

typedef __vector double Eigen::internal::Packet2d

◆ Packet2f

typedef float32x2_t Eigen::internal::Packet2f

◆ Packet2i

typedef int32x2_t Eigen::internal::Packet2i

◆ Packet2l

◆ Packet2ul

typedef __vector unsigned long long Eigen::internal::Packet2ul

◆ Packet4bi

typedef __vector __bool int Eigen::internal::Packet4bi

◆ Packet4d

typedef __m256d Eigen::internal::Packet4d

◆ Packet4f

typedef __m128 Eigen::internal::Packet4f

◆ Packet4i

typedef __vector int Eigen::internal::Packet4i

◆ Packet4ui

typedef __vector unsigned int Eigen::internal::Packet4ui

◆ Packet8d

typedef __m512d Eigen::internal::Packet8d

◆ Packet8f

typedef __m256 Eigen::internal::Packet8f

◆ Packet8i

typedef __vector short int Eigen::internal::Packet8i

◆ SsePrefetchPtrType

◆ UIntPtr

typedef std::size_t Eigen::internal::UIntPtr

Enumeration Type Documentation

◆ anonymous enum

anonymous enum
Enumerator
meta_floor_log2_terminate 
meta_floor_log2_move_up 
meta_floor_log2_move_down 
meta_floor_log2_bogus 
568 {
573};
@ meta_floor_log2_terminate
Definition MathFunctions.h:569
@ meta_floor_log2_move_up
Definition MathFunctions.h:570
@ meta_floor_log2_move_down
Definition MathFunctions.h:571
@ meta_floor_log2_bogus
Definition MathFunctions.h:572

◆ anonymous enum

anonymous enum
Enumerator
SDP_AsScalarProduct 
SDP_AsCwiseProduct 
29 {
32};
@ SDP_AsScalarProduct
Definition SparseDiagonalProduct.h:30
@ SDP_AsCwiseProduct
Definition SparseDiagonalProduct.h:31

◆ anonymous enum

anonymous enum
Enumerator
SVA_RuntimeSwitch 
SVA_Inner 
SVA_Outer 
49 {
53};
@ SVA_Inner
Definition SparseVector.h:51
@ SVA_Outer
Definition SparseVector.h:52
@ SVA_RuntimeSwitch
Definition SparseVector.h:50

◆ anonymous enum

anonymous enum
Enumerator
LUNoMarker 
37{ LUNoMarker = 3 };
@ LUNoMarker
Definition SparseLU_Memory.h:37

◆ anonymous enum

anonymous enum
Enumerator
emptyIdxLU 
38{emptyIdxLU = -1};
@ emptyIdxLU
Definition SparseLU_Memory.h:38

◆ anonymous enum

anonymous enum
Enumerator
PreconditionIfMoreColsThanRows 
PreconditionIfMoreRowsThanCols 
@ PreconditionIfMoreColsThanRows
Definition JacobiSVD.h:30
@ PreconditionIfMoreRowsThanCols
Definition JacobiSVD.h:30

◆ ComparisonName

Enumerator
cmp_EQ 
cmp_LT 
cmp_LE 
cmp_UNORD 
cmp_NEQ 
cmp_GT 
cmp_GE 
534 {
535 cmp_EQ = 0,
536 cmp_LT = 1,
537 cmp_LE = 2,
538 cmp_UNORD = 3,
539 cmp_NEQ = 4,
540 cmp_GT = 5,
541 cmp_GE = 6
542};
@ cmp_NEQ
Definition Constants.h:539
@ cmp_EQ
Definition Constants.h:535
@ cmp_GT
Definition Constants.h:540
@ cmp_LT
Definition Constants.h:536
@ cmp_GE
Definition Constants.h:541
@ cmp_LE
Definition Constants.h:537
@ cmp_UNORD
Definition Constants.h:538

◆ MemType

Enumerator
LUSUP 
UCOL 
LSUB 
USUB 
LLVL 
ULVL 
74{LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType;
@ LUSUP
Definition SparseLU_Structs.h:74
@ LSUB
Definition SparseLU_Structs.h:74
@ ULVL
Definition SparseLU_Structs.h:74
@ USUB
Definition SparseLU_Structs.h:74
@ LLVL
Definition SparseLU_Structs.h:74
@ UCOL
Definition SparseLU_Structs.h:74

◆ PermPermProduct_t

Enumerator
PermPermProduct 
@ PermPermProduct
Definition PermutationMatrix.h:18

◆ SignMatrix

Enumerator
PositiveSemiDef 
NegativeSemiDef 
ZeroSign 
Indefinite 
@ PositiveSemiDef
Definition LDLT.h:22
@ ZeroSign
Definition LDLT.h:22
@ NegativeSemiDef
Definition LDLT.h:22
@ Indefinite
Definition LDLT.h:22

Function Documentation

◆ _EIGEN_DECLARE_CONST_FAST_Packet2d()

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet2d ( ZERO  ,
 
)
static

◆ _EIGEN_DECLARE_CONST_FAST_Packet2l() [1/2]

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet2l ( ONE  ,
 
)
static

◆ _EIGEN_DECLARE_CONST_FAST_Packet2l() [2/2]

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet2l ( ZERO  ,
 
)
static

◆ _EIGEN_DECLARE_CONST_FAST_Packet4f()

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4f ( ZERO  ,
 
)
static

◆ _EIGEN_DECLARE_CONST_FAST_Packet4i() [1/5]

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( MINUS1  ,
1 
)
static

◆ _EIGEN_DECLARE_CONST_FAST_Packet4i() [2/5]

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( MINUS16  ,
16 
)
static

◆ _EIGEN_DECLARE_CONST_FAST_Packet4i() [3/5]

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( ONE  ,
 
)
static

◆ _EIGEN_DECLARE_CONST_FAST_Packet4i() [4/5]

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( ONE  ,
 
)
static

◆ _EIGEN_DECLARE_CONST_FAST_Packet4i() [5/5]

static Eigen::internal::_EIGEN_DECLARE_CONST_FAST_Packet4i ( ZERO  ,
 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [1/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( ,
1.  0 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [2/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( ,
2.  0 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [3/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_C1  ,
0.  693145751953125 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [4/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_C2  ,
1.42860682030941723212e-  6 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [5/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_p0  ,
1.26177193074810590878e-  4 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [6/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_p1  ,
3.02994407707441961300e-  2 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [7/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_p2  ,
9.99999999999999999910e-  1 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [8/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_q0  ,
3.00198505138664455042e-  6 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [9/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_q1  ,
2.52448340349684104192e-  3 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [10/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_q2  ,
2.27265548208155028766e-  1 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [11/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_exp_q3  ,
2.  00000000000000000009e0 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [12/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( cephes_LOG2EF  ,
1.  4426950408889634073599 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [13/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( exp_hi  ,
709.  437 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [14/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( exp_lo  ,
-709.  436139303 
)
static

◆ _EIGEN_DECLARE_CONST_Packet2d() [15/15]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet2d ( half  ,
0.  5 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [1/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( ,
1.  0f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [2/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_exp_C1  ,
0.  693359375f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [3/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_exp_C2  ,
-2.12194440e-  4f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [4/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_exp_p0  ,
1.9875691500E-  4f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [5/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_exp_p1  ,
1.3981999507E-  3f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [6/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_exp_p2  ,
8.3334519073E-  3f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [7/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_exp_p3  ,
4.1665795894E-  2f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [8/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_exp_p4  ,
1.6666665459E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [9/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_exp_p5  ,
5.0000001201E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [10/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_LOG2EF  ,
1.  44269504088896341f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [11/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p0  ,
7.0376836292E-  2f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [12/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p1  ,
- 1.1514610310E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [13/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p2  ,
1.1676998740E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [14/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p3  ,
- 1.2420140846E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [15/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p4  ,
+1.4249322787E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [16/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p5  ,
- 1.6668057665E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [17/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p6  ,
+2.0000714765E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [18/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p7  ,
- 2.4999993993E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [19/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_p8  ,
+3.3333331174E-  1f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [20/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_q1  ,
-2.12194440e-  4f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [21/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_log_q2  ,
0.  693359375f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [22/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( cephes_SQRTHF  ,
0.  707106781186547524f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [23/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( exp_hi  ,
88.  3762626647950f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [24/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( exp_lo  ,
-88.  3762626647949f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f() [25/25]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f ( half  ,
0.  5f 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT() [1/4]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f_FROM_INT ( inv_mant_mask  ,
0x7f800000 
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT() [2/4]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f_FROM_INT ( min_norm_pos  ,
0x00800000   
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT() [3/4]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f_FROM_INT ( minus_inf  ,
0xff800000   
)
static

◆ _EIGEN_DECLARE_CONST_Packet4f_FROM_INT() [4/4]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4f_FROM_INT ( minus_nan  ,
0xffffffff   
)
static

◆ _EIGEN_DECLARE_CONST_Packet4i() [1/2]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4i ( 0x7f  ,
0x7f   
)
static

◆ _EIGEN_DECLARE_CONST_Packet4i() [2/2]

static Eigen::internal::_EIGEN_DECLARE_CONST_Packet4i ( 23  ,
23   
)
static

◆ aligned_delete()

template<typename T >
EIGEN_DEVICE_FUNC void Eigen::internal::aligned_delete ( T *  ptr,
std::size_t  size 
)
inline
332{
333 destruct_elements_of_array<T>(ptr, size);
334 aligned_free(ptr);
335}
EIGEN_DEVICE_FUNC void aligned_free(void *ptr)
Definition Memory.h:174

References aligned_free().

Referenced by Eigen::internal::gemm_blocking_space< StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, false >::~gemm_blocking_space().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ aligned_free()

EIGEN_DEVICE_FUNC void Eigen::internal::aligned_free ( void ptr)
inline
175{
176 #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
177 std::free(ptr);
178 #else
180 #endif
181}
void handmade_aligned_free(void *ptr)
Definition Memory.h:96

References handmade_aligned_free().

Referenced by Eigen::internal::aligned_stack_memory_handler< T >::~aligned_stack_memory_handler(), aligned_delete(), aligned_new(), conditional_aligned_free(), and Eigen::aligned_allocator< T >::deallocate().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ aligned_malloc()

EIGEN_DEVICE_FUNC void * Eigen::internal::aligned_malloc ( std::size_t  size)
inline
154{
156
157 void *result;
158 #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
159 result = std::malloc(size);
160 #if EIGEN_DEFAULT_ALIGN_BYTES==16
161 eigen_assert((size<16 || (std::size_t(result)%16)==0) && "System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator.");
162 #endif
163 #else
164 result = handmade_aligned_malloc(size);
165 #endif
166
167 if(!result && size)
169
170 return result;
171}
#define eigen_assert(x)
Definition Macros.h:579
void * handmade_aligned_malloc(std::size_t size)
Definition Memory.h:86
EIGEN_DEVICE_FUNC void throw_std_bad_alloc()
Definition Memory.h:67
EIGEN_DEVICE_FUNC void check_that_malloc_is_allowed()
Definition Memory.h:146

References check_that_malloc_is_allowed(), eigen_assert, handmade_aligned_malloc(), and throw_std_bad_alloc().

Referenced by aligned_new(), Eigen::aligned_allocator< T >::allocate(), and conditional_aligned_malloc().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ aligned_new()

template<typename T >
EIGEN_DEVICE_FUNC T * Eigen::internal::aligned_new ( std::size_t  size)
inline
297{
298 check_size_for_overflow<T>(size);
299 T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
301 {
302 return construct_elements_of_array(result, size);
303 }
304 EIGEN_CATCH(...)
305 {
306 aligned_free(result);
308 }
309 return result;
310}
#define EIGEN_CATCH(X)
Definition Macros.h:978
#define EIGEN_THROW
Definition Macros.h:975
#define EIGEN_TRY
Definition Macros.h:977
EIGEN_DEVICE_FUNC void * aligned_malloc(std::size_t size)
Definition Memory.h:153
EIGEN_DEVICE_FUNC T * construct_elements_of_array(T *ptr, std::size_t size)
Definition Memory.h:265

References aligned_free(), aligned_malloc(), construct_elements_of_array(), EIGEN_CATCH, EIGEN_THROW, and EIGEN_TRY.

+ Here is the call graph for this function:

◆ aligned_realloc()

void * Eigen::internal::aligned_realloc ( void ptr,
std::size_t  new_size,
std::size_t  old_size 
)
inline
189{
190 EIGEN_UNUSED_VARIABLE(old_size);
191
192 void *result;
193#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
194 result = std::realloc(ptr,new_size);
195#else
196 result = handmade_aligned_realloc(ptr,new_size,old_size);
197#endif
198
199 if (!result && new_size)
201
202 return result;
203}
#define EIGEN_UNUSED_VARIABLE(var)
Definition Macros.h:618
void * handmade_aligned_realloc(void *ptr, std::size_t size, std::size_t=0)
Definition Memory.h:106

References EIGEN_UNUSED_VARIABLE, handmade_aligned_realloc(), and throw_std_bad_alloc().

Referenced by conditional_aligned_realloc().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ amd_flip()

template<typename T >
T Eigen::internal::amd_flip ( const T &  i)
inline
38{ return -i-2; }

Referenced by amd_mark(), amd_unflip(), and minimum_degree_ordering().

+ Here is the caller graph for this function:

◆ amd_mark()

template<typename T0 , typename T1 >
void Eigen::internal::amd_mark ( const T0 *  w,
const T1 &  j 
)
inline
41{ return w[j] = amd_flip(w[j]); }
T amd_flip(const T &i)
Definition Amd.h:38

References amd_flip().

+ Here is the call graph for this function:

◆ amd_marked()

template<typename T0 , typename T1 >
bool Eigen::internal::amd_marked ( const T0 *  w,
const T1 &  j 
)
inline
40{ return w[j]<0; }

◆ amd_unflip()

template<typename T >
T Eigen::internal::amd_unflip ( const T &  i)
inline
39{ return i<0 ? amd_flip(i) : i; }

References amd_flip().

+ Here is the call graph for this function:

◆ apply_block_householder_on_the_left()

template<typename MatrixType , typename VectorsType , typename CoeffsType >
void Eigen::internal::apply_block_householder_on_the_left ( MatrixType &  mat,
const VectorsType &  vectors,
const CoeffsType &  hCoeffs,
bool  forward 
)
80{
81 enum { TFactorSize = MatrixType::ColsAtCompileTime };
82 Index nbVecs = vectors.cols();
84
85 if(forward) make_block_householder_triangular_factor(T, vectors, hCoeffs);
86 else make_block_householder_triangular_factor(T, vectors, hCoeffs.conjugate());
88
89 // A -= V T V^* A
90 Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime,
91 (VectorsType::MaxColsAtCompileTime==1 && MatrixType::MaxColsAtCompileTime!=1)?RowMajor:ColMajor,
92 VectorsType::MaxColsAtCompileTime,MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat;
93 // FIXME add .noalias() once the triangular product can work inplace
94 if(forward) tmp = T.template triangularView<Upper>() * tmp;
95 else tmp = T.template triangularView<Upper>().adjoint() * tmp;
96 mat.noalias() -= V * tmp;
97}
The matrix class, also used for vectors and row-vectors.
Definition Matrix.h:180
Expression of a triangular part in a matrix.
Definition TriangularMatrix.h:188
@ ColMajor
Definition Constants.h:320
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition Meta.h:33

References Eigen::TriangularView< _MatrixType, _Mode >::adjoint(), Eigen::ColMajor, make_block_householder_triangular_factor(), and Eigen::RowMajor.

Referenced by Eigen::internal::householder_qr_inplace_blocked< MatrixQR, HCoeffs, MatrixQRScalar, InnerStrideIsOne >::run().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ apply_rotation_in_the_plane()

template<typename VectorX , typename VectorY , typename OtherScalar >
void Eigen::internal::apply_rotation_in_the_plane ( DenseBase< VectorX > &  xpr_x,
DenseBase< VectorY > &  xpr_y,
const JacobiRotation< OtherScalar > &  j 
)

\jacobi_module Applies the clock wise 2D rotation j to the set of 2D vectors of cordinates x and y: $ \left ( \begin{array}{cc} x \\ y \end{array} \right )  =  J \left ( \begin{array}{cc} x \\ y \end{array} \right ) $

See also
MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()
433{
434 typedef typename VectorX::Scalar Scalar;
435 const bool Vectorizable = (VectorX::Flags & VectorY::Flags & PacketAccessBit)
437
438 eigen_assert(xpr_x.size() == xpr_y.size());
439 Index size = xpr_x.size();
440 Index incrx = xpr_x.derived().innerStride();
441 Index incry = xpr_y.derived().innerStride();
442
443 Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0);
444 Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0);
445
446 OtherScalar c = j.c();
447 OtherScalar s = j.s();
448 if (c==OtherScalar(1) && s==OtherScalar(0))
449 return;
450
452 Scalar,OtherScalar,
453 VectorX::SizeAtCompileTime,
455 Vectorizable>::run(x,incrx,y,incry,size,c,s);
456}
#define EIGEN_RESTRICT
Definition Macros.h:796
#define EIGEN_PLAIN_ENUM_MIN(a, b)
Definition Macros.h:875
Scalar & s()
Definition Jacobi.h:47
Scalar & c()
Definition Jacobi.h:45
const unsigned int PacketAccessBit
Definition Constants.h:89
const Scalar & y
Definition MathFunctions.h:552
Definition CoreEvaluators.h:91
Definition GenericPacketMath.h:97

Referenced by Eigen::MatrixBase< Derived >::applyOnTheLeft(), Eigen::MatrixBase< Derived >::applyOnTheRight(), and llt_rank_update_lower().

+ Here is the caller graph for this function:

◆ assert_fail()

void Eigen::internal::assert_fail ( const char *  condition,
const char *  function,
const char *  file,
int  line 
)
inline
563 {
564 std::cerr << "assertion failed: " << condition << " in function " << function << " at " << file << ":" << line << std::endl;
565 abort();
566 }

◆ assign_sparse_to_sparse()

template<typename DstXprType , typename SrcXprType >
void Eigen::internal::assign_sparse_to_sparse ( DstXprType &  dst,
const SrcXprType &  src 
)
72{
73 typedef typename DstXprType::Scalar Scalar;
74 typedef internal::evaluator<DstXprType> DstEvaluatorType;
75 typedef internal::evaluator<SrcXprType> SrcEvaluatorType;
76
77 SrcEvaluatorType srcEvaluator(src);
78
79 const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
80 const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
81 if ((!transpose) && src.isRValue())
82 {
83 // eval without temporary
84 dst.resize(src.rows(), src.cols());
85 dst.setZero();
86 dst.reserve((std::max)(src.rows(),src.cols())*2);
87 for (Index j=0; j<outerEvaluationSize; ++j)
88 {
89 dst.startVec(j);
90 for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
91 {
92 Scalar v = it.value();
93 dst.insertBackByOuterInner(j,it.index()) = v;
94 }
95 }
96 dst.finalize();
97 }
98 else
99 {
100 // eval through a temporary
101 eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
102 (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
103 "the transpose operation is supposed to be handled in SparseMatrix::operator=");
104
105 enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
106
107
108 DstXprType temp(src.rows(), src.cols());
109
110 temp.reserve((std::max)(src.rows(),src.cols())*2);
111 for (Index j=0; j<outerEvaluationSize; ++j)
112 {
113 temp.startVec(j);
114 for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
115 {
116 Scalar v = it.value();
117 temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
118 }
119 }
120 temp.finalize();
121
122 dst = temp.markAsRValue();
123 }
124}
const unsigned int RowMajorBit
Definition Constants.h:61
typename Traits< remove_cvref_t< L > >::Scalar Scalar
Definition Line.hpp:36

References eigen_assert, Eigen::OuterRandomAccessPattern, and Eigen::RowMajorBit.

Referenced by Eigen::internal::Assignment< DstXprType, SrcXprType, Functor, Sparse2Sparse >::run().

+ Here is the caller graph for this function:

◆ asSluMatrix()

template<typename MatrixType >
SluMatrix Eigen::internal::asSluMatrix ( MatrixType &  mat)
292{
293 return SluMatrix::Map(mat);
294}

References Eigen::SluMatrix::Map().

Referenced by Eigen::SuperLUBase< _MatrixType, Derived >::initFactorization().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ bicgstab()

template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
bool Eigen::internal::bicgstab ( const MatrixType &  mat,
const Rhs &  rhs,
Dest &  x,
const Preconditioner &  precond,
Index iters,
typename Dest::RealScalar &  tol_error 
)
32{
33 using std::sqrt;
34 using std::abs;
35 typedef typename Dest::RealScalar RealScalar;
36 typedef typename Dest::Scalar Scalar;
37 typedef Matrix<Scalar,Dynamic,1> VectorType;
38 RealScalar tol = tol_error;
39 Index maxIters = iters;
40
41 Index n = mat.cols();
42 VectorType r = rhs - mat * x;
43 VectorType r0 = r;
44
45 RealScalar r0_sqnorm = r0.squaredNorm();
46 RealScalar rhs_sqnorm = rhs.squaredNorm();
47 if(rhs_sqnorm == 0)
48 {
49 x.setZero();
50 return true;
51 }
52 Scalar rho = 1;
53 Scalar alpha = 1;
54 Scalar w = 1;
55
56 VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);
57 VectorType y(n), z(n);
58 VectorType kt(n), ks(n);
59
60 VectorType s(n), t(n);
61
62 RealScalar tol2 = tol*tol*rhs_sqnorm;
64 Index i = 0;
65 Index restarts = 0;
66
67 while ( r.squaredNorm() > tol2 && i<maxIters )
68 {
69 Scalar rho_old = rho;
70
71 rho = r0.dot(r);
72 if (abs(rho) < eps2*r0_sqnorm)
73 {
74 // The new residual vector became too orthogonal to the arbitrarily chosen direction r0
75 // Let's restart with a new r0:
76 r = rhs - mat * x;
77 r0 = r;
78 rho = r0_sqnorm = r.squaredNorm();
79 if(restarts++ == 0)
80 i = 0;
81 }
82 Scalar beta = (rho/rho_old) * (alpha / w);
83 p = r + beta * (p - w * v);
84
85 y = precond.solve(p);
86
87 v.noalias() = mat * y;
88
89 alpha = rho / r0.dot(v);
90 s = r - alpha * v;
91
92 z = precond.solve(s);
93 t.noalias() = mat * z;
94
95 RealScalar tmp = t.squaredNorm();
96 if(tmp>RealScalar(0))
97 w = t.dot(s) / tmp;
98 else
99 w = Scalar(0);
100 x += alpha * y + w * z;
101 r = s - w * t;
102 ++i;
103 }
104 tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);
105 iters = i;
106 return true;
107}
EIGEN_DEVICE_FUNC const SqrtReturnType sqrt() const
Definition ArrayCwiseUnaryOps.h:152
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const
Definition PlainObjectBase.h:153
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition NumTraits.h:151

References Eigen::PlainObjectBase< Derived >::cols(), sqrt(), and y.

Referenced by Eigen::BiCGSTAB< _MatrixType, _Preconditioner >::_solve_with_guess_impl().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ blueNorm_impl()

template<typename Derived >
NumTraits< typenametraits< Derived >::Scalar >::Real Eigen::internal::blueNorm_impl ( const EigenBase< Derived > &  _vec)
inline
56{
57 typedef typename Derived::RealScalar RealScalar;
58 using std::pow;
59 using std::sqrt;
60 using std::abs;
61 const Derived& vec(_vec.derived());
62 static bool initialized = false;
63 static RealScalar b1, b2, s1m, s2m, rbig, relerr;
64 if(!initialized)
65 {
66 int ibeta, it, iemin, iemax, iexp;
67 RealScalar eps;
68 // This program calculates the machine-dependent constants
69 // bl, b2, slm, s2m, relerr overfl
70 // from the "basic" machine-dependent numbers
71 // nbig, ibeta, it, iemin, iemax, rbig.
72 // The following define the basic machine-dependent constants.
73 // For portability, the PORT subprograms "ilmaeh" and "rlmach"
74 // are used. For any specific computer, each of the assignment
75 // statements can be replaced
76 ibeta = std::numeric_limits<RealScalar>::radix; // base for floating-point numbers
77 it = std::numeric_limits<RealScalar>::digits; // number of base-beta digits in mantissa
78 iemin = std::numeric_limits<RealScalar>::min_exponent; // minimum exponent
79 iemax = std::numeric_limits<RealScalar>::max_exponent; // maximum exponent
80 rbig = (std::numeric_limits<RealScalar>::max)(); // largest floating-point number
81
82 iexp = -((1-iemin)/2);
83 b1 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // lower boundary of midrange
84 iexp = (iemax + 1 - it)/2;
85 b2 = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // upper boundary of midrange
86
87 iexp = (2-iemin)/2;
88 s1m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for lower range
89 iexp = - ((iemax+it)/2);
90 s2m = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp))); // scaling factor for upper range
91
92 eps = RealScalar(pow(double(ibeta), 1-it));
93 relerr = sqrt(eps); // tolerance for neglecting asml
94 initialized = true;
95 }
96 Index n = vec.size();
97 RealScalar ab2 = b2 / RealScalar(n);
98 RealScalar asml = RealScalar(0);
99 RealScalar amed = RealScalar(0);
100 RealScalar abig = RealScalar(0);
101 for(typename Derived::InnerIterator it(vec, 0); it; ++it)
102 {
103 RealScalar ax = abs(it.value());
104 if(ax > ab2) abig += numext::abs2(ax*s2m);
105 else if(ax < b1) asml += numext::abs2(ax*s1m);
106 else amed += numext::abs2(ax);
107 }
108 if(amed!=amed)
109 return amed; // we got a NaN
110 if(abig > RealScalar(0))
111 {
112 abig = sqrt(abig);
113 if(abig > rbig) // overflow, or *this contains INF values
114 return abig; // return INF
115 if(amed > RealScalar(0))
116 {
117 abig = abig/s2m;
118 amed = sqrt(amed);
119 }
120 else
121 return abig/s2m;
122 }
123 else if(asml > RealScalar(0))
124 {
125 if (amed > RealScalar(0))
126 {
127 abig = sqrt(amed);
128 amed = sqrt(asml) / s1m;
129 }
130 else
131 return sqrt(asml)/s1m;
132 }
133 else
134 return sqrt(amed);
135 asml = numext::mini(abig, amed);
136 abig = numext::maxi(abig, amed);
137 if(asml <= abig*relerr)
138 return abig;
139 else
140 return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig));
141}
EIGEN_DEVICE_FUNC Derived & derived()
Definition EigenBase.h:45
static BOOLEAN initialized
Definition hid.c:131

References Eigen::EigenBase< Derived >::derived(), initialized, Eigen::numext::maxi(), Eigen::numext::mini(), and sqrt().

Referenced by Eigen::MatrixBase< Derived >::blueNorm(), and Eigen::SparseMatrixBase< Derived >::blueNorm().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ bruteforce_det3_helper()

template<typename Derived >
const Derived::Scalar Eigen::internal::bruteforce_det3_helper ( const MatrixBase< Derived > &  matrix,
int  a,
int  b,
int  c 
)
inline
20{
21 return matrix.coeff(0,a)
22 * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
23}

Referenced by Eigen::internal::determinant_impl< Derived, 3 >::run().

+ Here is the caller graph for this function:

◆ bruteforce_det4_helper()

template<typename Derived >
const Derived::Scalar Eigen::internal::bruteforce_det4_helper ( const MatrixBase< Derived > &  matrix,
int  j,
int  k,
int  m,
int  n 
)
28{
29 return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
30 * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
31}

Referenced by Eigen::internal::determinant_impl< Derived, 4 >::run().

+ Here is the caller graph for this function:

◆ c_to_fortran_numbering()

template<typename MatrixType >
void Eigen::internal::c_to_fortran_numbering ( MatrixType &  mat)
98 {
99 if ( !(mat.outerIndexPtr()[0]) )
100 {
101 int i;
102 for(i = 0; i <= mat.rows(); ++i)
103 ++mat.outerIndexPtr()[i];
104 for(i = 0; i < mat.nonZeros(); ++i)
105 ++mat.innerIndexPtr()[i];
106 }
107 }

Referenced by Eigen::PastixLU< _MatrixType, IsStrSym >::grabMatrix(), Eigen::PastixLLT< _MatrixType, _UpLo >::grabMatrix(), and Eigen::PastixLDLT< _MatrixType, _UpLo >::grabMatrix().

+ Here is the caller graph for this function:

◆ call_assignment() [1/5]

template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( const Dst dst,
const Src &  src 
)
787{
789}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment(Dst &dst, const Src &src)
Definition AssignEvaluator.h:780
Definition AssignmentFunctors.h:21

References call_assignment().

+ Here is the call graph for this function:

◆ call_assignment() [2/5]

template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( Dst dst,
const Src &  src 
)

◆ call_assignment() [3/5]

template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( Dst dst,
const Src &  src,
const Func &  func,
typename enable_if< evaluator_assume_aliasing< Src >::value, void * >::type  = 0 
)
795{
796 typename plain_matrix_type<Src>::type tmp(src);
797 call_assignment_no_alias(dst, tmp, func);
798}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
Definition AssignEvaluator.h:819
Definition XprHelper.h:275

References call_assignment_no_alias().

+ Here is the call graph for this function:

◆ call_assignment() [4/5]

template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( Dst dst,
const Src &  src,
const Func &  func,
typename enable_if<!evaluator_assume_aliasing< Src >::value, void * >::type  = 0 
)
803{
804 call_assignment_no_alias(dst, src, func);
805}

References call_assignment_no_alias().

+ Here is the call graph for this function:

◆ call_assignment() [5/5]

template<typename Dst , template< typename > class StorageBase, typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment ( NoAlias< Dst, StorageBase > &  dst,
const Src &  src,
const Func &  func 
)
812{
813 call_assignment_no_alias(dst.expression(), src, func);
814}
EIGEN_DEVICE_FUNC ExpressionType & expression() const
Definition NoAlias.h:63

References call_assignment_no_alias(), and Eigen::NoAlias< ExpressionType, StorageBase >::expression().

+ Here is the call graph for this function:

◆ call_assignment_no_alias() [1/2]

template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment_no_alias ( Dst dst,
const Src &  src 
)

References call_assignment_no_alias().

+ Here is the call graph for this function:

◆ call_assignment_no_alias() [2/2]

template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment_no_alias ( Dst dst,
const Src &  src,
const Func &  func 
)
820{
821 enum {
822 NeedToTranspose = ( (int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1)
823 || (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1)
824 ) && int(Dst::SizeAtCompileTime) != 1
825 };
826
827 typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst>::type ActualDstTypeCleaned;
828 typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst&>::type ActualDstType;
829 ActualDstType actualDst(dst);
830
831 // TODO check whether this is the right place to perform these checks:
833 EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src)
834 EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar);
835
836 Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func);
837}
#define EIGEN_STATIC_ASSERT_LVALUE(Derived)
Definition StaticAssert.h:199
#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0, TYPE1)
Definition StaticAssert.h:189
#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP, LHS, RHS)
Definition XprHelper.h:815
#define Dst
Definition mesh.h:153
Definition Meta.h:58

References EIGEN_CHECK_BINARY_COMPATIBILIY, EIGEN_STATIC_ASSERT_LVALUE, and EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE.

Referenced by Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::SparseMatrix(), Eigen::internal::unary_evaluator< Inverse< ArgType > >::unary_evaluator(), Eigen::PlainObjectBase< Derived >::_set_noalias(), Eigen::internal::generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, CoeffBasedProductMode >::addTo(), call_assignment(), call_assignment(), call_assignment(), call_assignment_no_alias(), Eigen::Ref< const TPlainObjectType, Options, StrideType >::construct(), Eigen::internal::generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, CoeffBasedProductMode >::evalTo(), Eigen::DenseBase< Derived >::lazyAssign(), Eigen::SparseMatrixBase< Derived >::operator=(), Eigen::SparseMatrixBase< Derived >::operator=(), Eigen::internal::Assignment< DstXprType, CwiseBinaryOp< internal::scalar_product_op< ScalarBis, Scalar >, const CwiseNullaryOp< internal::scalar_constant_op< ScalarBis >, Plain >, const Product< Lhs, Rhs, DefaultProduct > >, AssignFunc, Dense2Dense >::run(), Eigen::internal::assignment_from_xpr_op_product< DstXprType, OtherXpr, ProductType, Func1, Func2 >::run(), and Eigen::internal::generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, CoeffBasedProductMode >::subTo().

+ Here is the caller graph for this function:

◆ call_assignment_no_alias_no_transpose() [1/2]

template<typename Dst , typename Src >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment_no_alias_no_transpose ( Dst dst,
const Src &  src 
)
859{
861}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias_no_transpose(Dst &dst, const Src &src, const Func &func)
Definition AssignEvaluator.h:847

References call_assignment_no_alias_no_transpose().

+ Here is the call graph for this function:

◆ call_assignment_no_alias_no_transpose() [2/2]

template<typename Dst , typename Src , typename Func >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_assignment_no_alias_no_transpose ( Dst dst,
const Src &  src,
const Func &  func 
)
848{
849 // TODO check whether this is the right place to perform these checks:
852 EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename Dst::Scalar,typename Src::Scalar);
853
854 Assignment<Dst,Src,Func>::run(dst, src, func);
855}
Definition AssignEvaluator.h:770

References EIGEN_CHECK_BINARY_COMPATIBILIY, EIGEN_STATIC_ASSERT_LVALUE, and EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE.

Referenced by call_assignment_no_alias_no_transpose(), Eigen::SparseSelfAdjointView< MatrixType, _Mode >::operator=(), and Eigen::internal::Assignment< DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse >::run().

+ Here is the caller graph for this function:

◆ call_dense_assignment_loop() [1/2]

template<typename DstXprType , typename SrcXprType >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_dense_assignment_loop ( DstXprType &  dst,
const SrcXprType &  src 
)
746{
748}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType &dst, const SrcXprType &src, const Functor &func)
Definition AssignEvaluator.h:725

References call_dense_assignment_loop().

+ Here is the call graph for this function:

◆ call_dense_assignment_loop() [2/2]

template<typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_dense_assignment_loop ( DstXprType &  dst,
const SrcXprType &  src,
const Functor &  func 
)
726{
727 typedef evaluator<DstXprType> DstEvaluatorType;
728 typedef evaluator<SrcXprType> SrcEvaluatorType;
729
730 SrcEvaluatorType srcEvaluator(src);
731
732 // NOTE To properly handle A = (A*A.transpose())/s with A rectangular,
733 // we need to resize the destination after the source evaluator has been created.
734 resize_if_allowed(dst, src, func);
735
736 DstEvaluatorType dstEvaluator(dst);
737
739 Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
740
742}
Definition AssignEvaluator.h:597
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize_if_allowed(DstXprType &dst, const SrcXprType &src, const Functor &)
Definition AssignEvaluator.h:706
Definition AssignEvaluator.h:313
CGAL::Exact_predicates_inexact_constructions_kernel Kernel
Definition point_areas.cpp:16

References resize_if_allowed().

Referenced by call_dense_assignment_loop(), and Eigen::internal::Assignment< DstXprType, SrcXprType, Functor, Dense2Dense, Weak >::run().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ call_triangular_assignment_loop() [1/2]

template<int Mode, bool SetOpposite, typename DstXprType , typename SrcXprType >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_triangular_assignment_loop ( DstXprType &  dst,
const SrcXprType &  src 
)
813{
814 call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());
815}

References call_triangular_assignment_loop().

+ Here is the call graph for this function:

◆ call_triangular_assignment_loop() [2/2]

template<int Mode, bool SetOpposite, typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::call_triangular_assignment_loop ( DstXprType &  dst,
const SrcXprType &  src,
const Functor &  func 
)
785{
786 typedef evaluator<DstXprType> DstEvaluatorType;
787 typedef evaluator<SrcXprType> SrcEvaluatorType;
788
789 SrcEvaluatorType srcEvaluator(src);
790
791 Index dstRows = src.rows();
792 Index dstCols = src.cols();
793 if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
794 dst.resize(dstRows, dstCols);
795 DstEvaluatorType dstEvaluator(dst);
796
797 typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite,
798 DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;
799 Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());
800
801 enum {
802 unroll = DstXprType::SizeAtCompileTime != Dynamic
803 && SrcEvaluatorType::CoeffReadCost < HugeCost
804 && DstXprType::SizeAtCompileTime * (DstEvaluatorType::CoeffReadCost+SrcEvaluatorType::CoeffReadCost) / 2 <= EIGEN_UNROLLING_LIMIT
805 };
806
807 triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);
808}
#define EIGEN_UNROLLING_LIMIT
Definition Settings.h:24
@ Lower
Definition Constants.h:204
@ Upper
Definition Constants.h:206
const int HugeCost
Definition Constants.h:39
const int Dynamic
Definition Constants.h:21

References call_triangular_assignment_loop(), Eigen::Dynamic, EIGEN_UNROLLING_LIMIT, Eigen::HugeCost, Eigen::Lower, Eigen::SelfAdjoint, Eigen::UnitDiag, Eigen::Upper, and Eigen::ZeroDiag.

Referenced by call_triangular_assignment_loop(), call_triangular_assignment_loop(), and Eigen::TriangularBase< Derived >::evalToLazy().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ cast()

template<typename OldType , typename NewType >
EIGEN_DEVICE_FUNC NewType Eigen::internal::cast ( const OldType &  x)
inline
378{
380}
Definition MathFunctions.h:365

References Eigen::internal::cast_impl< OldType, NewType >::run().

+ Here is the call graph for this function:

◆ check_DenseIndex_is_signed()

static void Eigen::internal::check_DenseIndex_is_signed ( )
inlinestatic
20 {
21 EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
22}
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition StaticAssert.h:124

References EIGEN_STATIC_ASSERT.

◆ check_for_aliasing()

template<typename Dst , typename Src >
void Eigen::internal::check_for_aliasing ( const Dst dst,
const Src &  src 
)

References Eigen::internal::checkTransposeAliasing_impl< Derived, OtherDerived, MightHaveTransposeAliasing >::run().

+ Here is the call graph for this function:

◆ check_size_for_overflow()

template<typename T >
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void Eigen::internal::check_size_for_overflow ( std::size_t  size)
287{
288 if(size > std::size_t(-1) / sizeof(T))
290}

References throw_std_bad_alloc().

+ Here is the call graph for this function:

◆ check_static_allocation_size()

template<typename T , int Size>
EIGEN_DEVICE_FUNC void Eigen::internal::check_static_allocation_size ( )
30{
31 // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit
32 #if EIGEN_STACK_ALLOCATION_LIMIT
33 EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);
34 #endif
35}
#define EIGEN_STACK_ALLOCATION_LIMIT
Definition Macros.h:801

References EIGEN_STACK_ALLOCATION_LIMIT, and EIGEN_STATIC_ASSERT.

◆ check_that_malloc_is_allowed()

EIGEN_DEVICE_FUNC void Eigen::internal::check_that_malloc_is_allowed ( )
inline
147{}

Referenced by aligned_malloc(), and conditional_aligned_malloc< false >().

+ Here is the caller graph for this function:

◆ cofactor_3x3()

template<typename MatrixType , int i, int j>
EIGEN_DEVICE_FUNC MatrixType::Scalar Eigen::internal::cofactor_3x3 ( const MatrixType &  m)
inline
127{
128 enum {
129 i1 = (i+1) % 3,
130 i2 = (i+2) % 3,
131 j1 = (j+1) % 3,
132 j2 = (j+2) % 3
133 };
134 return m.coeff(i1, j1) * m.coeff(i2, j2)
135 - m.coeff(i1, j2) * m.coeff(i2, j1);
136}

◆ cofactor_4x4()

template<typename MatrixType , int i, int j>
EIGEN_DEVICE_FUNC MatrixType::Scalar Eigen::internal::cofactor_4x4 ( const MatrixType &  matrix)
inline
214{
215 enum {
216 i1 = (i+1) % 4,
217 i2 = (i+2) % 4,
218 i3 = (i+3) % 4,
219 j1 = (j+1) % 4,
220 j2 = (j+2) % 4,
221 j3 = (j+3) % 4
222 };
223 return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3)
224 + general_det3_helper(matrix, i2, i3, i1, j1, j2, j3)
225 + general_det3_helper(matrix, i3, i1, i2, j1, j2, j3);
226}

References general_det3_helper().

+ Here is the call graph for this function:

◆ coletree()

template<typename MatrixType , typename IndexVector >
int Eigen::internal::coletree ( const MatrixType &  mat,
IndexVector &  parent,
IndexVector &  firstRowElt,
typename MatrixType::StorageIndex *  perm = 0 
)

Compute the column elimination tree of a sparse matrix

Parameters
matThe matrix in column-major format.
parentThe elimination tree
firstRowEltThe column index of the first element in each row
permThe permutation to apply to the column of mat
62{
63 typedef typename MatrixType::StorageIndex StorageIndex;
64 StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns
65 StorageIndex m = convert_index<StorageIndex>(mat.rows());
66 StorageIndex diagSize = (std::min)(nc,m);
67 IndexVector root(nc); // root of subtree of etree
68 root.setZero();
69 IndexVector pp(nc); // disjoint sets
70 pp.setZero(); // Initialize disjoint sets
71 parent.resize(mat.cols());
72 //Compute first nonzero column in each row
73 firstRowElt.resize(m);
74 firstRowElt.setConstant(nc);
75 firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
76 bool found_diag;
77 for (StorageIndex col = 0; col < nc; col++)
78 {
79 StorageIndex pcol = col;
80 if(perm) pcol = perm[col];
81 for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
82 {
83 Index row = it.row();
84 firstRowElt(row) = (std::min)(firstRowElt(row), col);
85 }
86 }
87 /* Compute etree by Liu's algorithm for symmetric matrices,
88 except use (firstRowElt[r],c) in place of an edge (r,c) of A.
89 Thus each row clique in A'*A is replaced by a star
90 centered at its first vertex, which has the same fill. */
91 StorageIndex rset, cset, rroot;
92 for (StorageIndex col = 0; col < nc; col++)
93 {
94 found_diag = col>=m;
95 pp(col) = col;
96 cset = col;
97 root(cset) = col;
98 parent(col) = nc;
99 /* The diagonal element is treated here even if it does not exist in the matrix
100 * hence the loop is executed once more */
101 StorageIndex pcol = col;
102 if(perm) pcol = perm[col];
103 for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
104 { // A sequence of interleaved find and union is performed
105 Index i = col;
106 if(it) i = it.index();
107 if (i == col) found_diag = true;
108
109 StorageIndex row = firstRowElt(i);
110 if (row >= col) continue;
111 rset = internal::etree_find(row, pp); // Find the name of the set containing row
112 rroot = root(rset);
113 if (rroot != col)
114 {
115 parent(rroot) = col;
116 pp(cset) = rset;
117 cset = rset;
118 root(cset) = col;
119 }
120 }
121 }
122 return 0;
123}
EIGEN_DEVICE_FUNC RowXpr row(Index i)
This is the const version of row(). *‍/.
Definition BlockMethods.h:859
EIGEN_DEVICE_FUNC ColXpr col(Index i)
This is the const version of col().
Definition BlockMethods.h:838

References col(), etree_find(), and row().

Referenced by Eigen::SparseQR< _MatrixType, _OrderingType >::analyzePattern(), Eigen::SparseLU< _MatrixType, _OrderingType >::analyzePattern(), and Eigen::SparseQR< _MatrixType, _OrderingType >::factorize().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ compute_inverse_size2_helper()

template<typename MatrixType , typename ResultType >
EIGEN_DEVICE_FUNC void Eigen::internal::compute_inverse_size2_helper ( const MatrixType &  matrix,
const typename ResultType::Scalar &  invdet,
ResultType &  result 
)
inline
79{
80 result.coeffRef(0,0) = matrix.coeff(1,1) * invdet;
81 result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
82 result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
83 result.coeffRef(1,1) = matrix.coeff(0,0) * invdet;
84}

Referenced by Eigen::internal::compute_inverse_and_det_with_check< MatrixType, ResultType, 2 >::run(), and Eigen::internal::compute_inverse< MatrixType, ResultType, 2 >::run().

+ Here is the caller graph for this function:

◆ compute_inverse_size3_helper()

template<typename MatrixType , typename ResultType >
EIGEN_DEVICE_FUNC void Eigen::internal::compute_inverse_size3_helper ( const MatrixType &  matrix,
const typename ResultType::Scalar &  invdet,
const Matrix< typename ResultType::Scalar, 3, 1 > &  cofactors_col0,
ResultType &  result 
)
inline
145{
146 result.row(0) = cofactors_col0 * invdet;
147 result.coeffRef(1,0) = cofactor_3x3<MatrixType,0,1>(matrix) * invdet;
148 result.coeffRef(1,1) = cofactor_3x3<MatrixType,1,1>(matrix) * invdet;
149 result.coeffRef(1,2) = cofactor_3x3<MatrixType,2,1>(matrix) * invdet;
150 result.coeffRef(2,0) = cofactor_3x3<MatrixType,0,2>(matrix) * invdet;
151 result.coeffRef(2,1) = cofactor_3x3<MatrixType,1,2>(matrix) * invdet;
152 result.coeffRef(2,2) = cofactor_3x3<MatrixType,2,2>(matrix) * invdet;
153}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index rowId, Index colId)
Definition PlainObjectBase.h:183

References Eigen::Matrix< _Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols >::coeffRef().

Referenced by Eigen::internal::compute_inverse_and_det_with_check< MatrixType, ResultType, 3 >::run(), and Eigen::internal::compute_inverse< MatrixType, ResultType, 3 >::run().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ computeFromTridiagonal_impl()

template<typename MatrixType , typename DiagType , typename SubDiagType >
ComputationInfo Eigen::internal::computeFromTridiagonal_impl ( DiagType &  diag,
SubDiagType &  subdiag,
const Index  maxIterations,
bool  computeEigenvectors,
MatrixType &  eivec 
)
483{
484 using std::abs;
485
486 ComputationInfo info;
487 typedef typename MatrixType::Scalar Scalar;
488
489 Index n = diag.size();
490 Index end = n-1;
491 Index start = 0;
492 Index iter = 0; // total number of iterations
493
494 typedef typename DiagType::RealScalar RealScalar;
495 const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
496 const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
497
498 while (end>0)
499 {
500 for (Index i = start; i<end; ++i)
501 if (internal::isMuchSmallerThan(abs(subdiag[i]),(abs(diag[i])+abs(diag[i+1])),precision) || abs(subdiag[i]) <= considerAsZero)
502 subdiag[i] = 0;
503
504 // find the largest unreduced block
505 while (end>0 && subdiag[end-1]==RealScalar(0))
506 {
507 end--;
508 }
509 if (end<=0)
510 break;
511
512 // if we spent too many iterations, we give up
513 iter++;
514 if(iter > maxIterations * n) break;
515
516 start = end - 1;
517 while (start>0 && subdiag[start-1]!=0)
518 start--;
519
520 internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n);
521 }
522 if (iter <= maxIterations * n)
523 info = Success;
524 else
525 info = NoConvergence;
526
527 // Sort eigenvalues and corresponding vectors.
528 // TODO make the sort optional ?
529 // TODO use a better sort algorithm !!
530 if (info == Success)
531 {
532 for (Index i = 0; i < n-1; ++i)
533 {
534 Index k;
535 diag.segment(i,n-i).minCoeff(&k);
536 if (k > 0)
537 {
538 std::swap(diag[i], diag[k+i]);
539 if(computeEigenvectors)
540 eivec.col(i).swap(eivec.col(k+i));
541 }
542 }
543 }
544 return info;
545}
ComputationInfo
Definition Constants.h:430

References Eigen::end(), isMuchSmallerThan(), Eigen::NoConvergence, and Eigen::Success.

Referenced by Eigen::SelfAdjointEigenSolver< _MatrixType >::compute(), and Eigen::SelfAdjointEigenSolver< _MatrixType >::computeFromTridiagonal().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ computeProductBlockingSizes() [1/2]

template<typename LhsScalar , typename RhsScalar , int KcFactor, typename Index >
void Eigen::internal::computeProductBlockingSizes ( Index k,
Index m,
Index n,
Index  num_threads = 1 
)

Computes the blocking parameters for a m x k times k x n matrix product.

Parameters
[in,out]kInput: the third dimension of the product. Output: the blocking size along the same dimension.
[in,out]mInput: the number of rows of the left hand side. Output: the blocking size along the same dimension.
[in,out]nInput: the number of columns of the right hand side. Output: the blocking size along the same dimension.

Given a m x k times k x n matrix product of scalar types LhsScalar and RhsScalar, this function computes the blocking size parameters along the respective dimensions for matrix products and related algorithms.

The blocking size parameters may be evaluated:

  • either by a heuristic based on cache sizes;
  • or using fixed prescribed values (for testing purposes).
See also
setCpuCacheSizes
298{
299 if (!useSpecificBlockingSizes(k, m, n)) {
300 evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor, Index>(k, m, n, num_threads);
301 }
302}
bool useSpecificBlockingSizes(Index &k, Index &m, Index &n)
Definition GeneralBlockPanelKernel.h:263

References useSpecificBlockingSizes().

+ Here is the call graph for this function:

◆ computeProductBlockingSizes() [2/2]

template<typename LhsScalar , typename RhsScalar , typename Index >
void Eigen::internal::computeProductBlockingSizes ( Index k,
Index m,
Index n,
Index  num_threads = 1 
)
inline
306{
307 computeProductBlockingSizes<LhsScalar,RhsScalar,1,Index>(k, m, n, num_threads);
308}

◆ conditional_aligned_delete()

template<typename T , bool Align>
EIGEN_DEVICE_FUNC void Eigen::internal::conditional_aligned_delete ( T *  ptr,
std::size_t  size 
)
inline
341{
342 destruct_elements_of_array<T>(ptr, size);
343 conditional_aligned_free<Align>(ptr);
344}

◆ conditional_aligned_delete_auto()

template<typename T , bool Align>
EIGEN_DEVICE_FUNC void Eigen::internal::conditional_aligned_delete_auto ( T *  ptr,
std::size_t  size 
)
inline
413{
415 destruct_elements_of_array<T>(ptr, size);
416 conditional_aligned_free<Align>(ptr);
417}

◆ conditional_aligned_free()

template<bool Align>
EIGEN_DEVICE_FUNC void Eigen::internal::conditional_aligned_free ( void ptr)
inline
229{
230 aligned_free(ptr);
231}

References aligned_free().

+ Here is the call graph for this function:

◆ conditional_aligned_free< false >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::conditional_aligned_free< false > ( void ptr)
inline
234{
235 std::free(ptr);
236}

◆ conditional_aligned_malloc()

template<bool Align>
EIGEN_DEVICE_FUNC void * Eigen::internal::conditional_aligned_malloc ( std::size_t  size)
inline
213{
214 return aligned_malloc(size);
215}

References aligned_malloc().

+ Here is the call graph for this function:

◆ conditional_aligned_malloc< false >()

template<>
EIGEN_DEVICE_FUNC void * Eigen::internal::conditional_aligned_malloc< false > ( std::size_t  size)
inline
218{
220
221 void *result = std::malloc(size);
222 if(!result && size)
224 return result;
225}

References check_that_malloc_is_allowed(), and throw_std_bad_alloc().

+ Here is the call graph for this function:

◆ conditional_aligned_new()

template<typename T , bool Align>
EIGEN_DEVICE_FUNC T * Eigen::internal::conditional_aligned_new ( std::size_t  size)
inline
313{
314 check_size_for_overflow<T>(size);
315 T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
317 {
318 return construct_elements_of_array(result, size);
319 }
320 EIGEN_CATCH(...)
321 {
322 conditional_aligned_free<Align>(result);
324 }
325 return result;
326}

References construct_elements_of_array(), EIGEN_CATCH, EIGEN_THROW, and EIGEN_TRY.

+ Here is the call graph for this function:

◆ conditional_aligned_new_auto()

template<typename T , bool Align>
EIGEN_DEVICE_FUNC T * Eigen::internal::conditional_aligned_new_auto ( std::size_t  size)
inline
370{
371 if(size==0)
372 return 0; // short-cut. Also fixes Bug 884
373 check_size_for_overflow<T>(size);
374 T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
376 {
378 {
379 construct_elements_of_array(result, size);
380 }
381 EIGEN_CATCH(...)
382 {
383 conditional_aligned_free<Align>(result);
385 }
386 }
387 return result;
388}

References construct_elements_of_array(), EIGEN_CATCH, EIGEN_THROW, and EIGEN_TRY.

+ Here is the call graph for this function:

◆ conditional_aligned_realloc()

template<bool Align>
void * Eigen::internal::conditional_aligned_realloc ( void ptr,
std::size_t  new_size,
std::size_t  old_size 
)
inline
239{
240 return aligned_realloc(ptr, new_size, old_size);
241}
void * aligned_realloc(void *ptr, std::size_t new_size, std::size_t old_size)
Definition Memory.h:188

References aligned_realloc().

+ Here is the call graph for this function:

◆ conditional_aligned_realloc< false >()

template<>
void * Eigen::internal::conditional_aligned_realloc< false > ( void ptr,
std::size_t  new_size,
std::size_t   
)
inline
244{
245 return std::realloc(ptr, new_size);
246}

◆ conditional_aligned_realloc_new()

template<typename T , bool Align>
EIGEN_DEVICE_FUNC T * Eigen::internal::conditional_aligned_realloc_new ( T *  pts,
std::size_t  new_size,
std::size_t  old_size 
)
inline
347{
348 check_size_for_overflow<T>(new_size);
349 check_size_for_overflow<T>(old_size);
350 if(new_size < old_size)
351 destruct_elements_of_array(pts+new_size, old_size-new_size);
352 T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
353 if(new_size > old_size)
354 {
356 {
357 construct_elements_of_array(result+old_size, new_size-old_size);
358 }
359 EIGEN_CATCH(...)
360 {
361 conditional_aligned_free<Align>(result);
363 }
364 }
365 return result;
366}
EIGEN_DEVICE_FUNC void destruct_elements_of_array(T *ptr, std::size_t size)
Definition Memory.h:255

References construct_elements_of_array(), destruct_elements_of_array(), EIGEN_CATCH, EIGEN_THROW, and EIGEN_TRY.

+ Here is the call graph for this function:

◆ conditional_aligned_realloc_new_auto()

template<typename T , bool Align>
T * Eigen::internal::conditional_aligned_realloc_new_auto ( T *  pts,
std::size_t  new_size,
std::size_t  old_size 
)
inline
391{
392 check_size_for_overflow<T>(new_size);
393 check_size_for_overflow<T>(old_size);
394 if(NumTraits<T>::RequireInitialization && (new_size < old_size))
395 destruct_elements_of_array(pts+new_size, old_size-new_size);
396 T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
397 if(NumTraits<T>::RequireInitialization && (new_size > old_size))
398 {
400 {
401 construct_elements_of_array(result+old_size, new_size-old_size);
402 }
403 EIGEN_CATCH(...)
404 {
405 conditional_aligned_free<Align>(result);
407 }
408 }
409 return result;
410}

References construct_elements_of_array(), destruct_elements_of_array(), EIGEN_CATCH, EIGEN_THROW, and EIGEN_TRY.

+ Here is the call graph for this function:

◆ conjugate_gradient()

template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
EIGEN_DONT_INLINE void Eigen::internal::conjugate_gradient ( const MatrixType &  mat,
const Rhs &  rhs,
Dest &  x,
const Preconditioner &  precond,
Index iters,
typename Dest::RealScalar &  tol_error 
)
31{
32 using std::sqrt;
33 using std::abs;
34 typedef typename Dest::RealScalar RealScalar;
35 typedef typename Dest::Scalar Scalar;
36 typedef Matrix<Scalar,Dynamic,1> VectorType;
37
38 RealScalar tol = tol_error;
39 Index maxIters = iters;
40
41 Index n = mat.cols();
42
43 VectorType residual = rhs - mat * x; //initial residual
44
45 RealScalar rhsNorm2 = rhs.squaredNorm();
46 if(rhsNorm2 == 0)
47 {
48 x.setZero();
49 iters = 0;
50 tol_error = 0;
51 return;
52 }
53 const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
54 RealScalar threshold = numext::maxi(tol*tol*rhsNorm2,considerAsZero);
55 RealScalar residualNorm2 = residual.squaredNorm();
56 if (residualNorm2 < threshold)
57 {
58 iters = 0;
59 tol_error = sqrt(residualNorm2 / rhsNorm2);
60 return;
61 }
62
63 VectorType p(n);
64 p = precond.solve(residual); // initial search direction
65
66 VectorType z(n), tmp(n);
67 RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
68 Index i = 0;
69 while(i < maxIters)
70 {
71 tmp.noalias() = mat * p; // the bottleneck of the algorithm
72
73 Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
74 x += alpha * p; // update solution
75 residual -= alpha * tmp; // update residual
76
77 residualNorm2 = residual.squaredNorm();
78 if(residualNorm2 < threshold)
79 break;
80
81 z = precond.solve(residual); // approximately solve for "A z = residual"
82
83 RealScalar absOld = absNew;
84 absNew = numext::real(residual.dot(z)); // update the absolute value of r
85 RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
86 p = z + beta * p; // update search direction
87 i++;
88 }
89 tol_error = sqrt(residualNorm2 / rhsNorm2);
90 iters = i;
91}

References Eigen::PlainObjectBase< Derived >::cols(), Eigen::numext::maxi(), and sqrt().

Referenced by Eigen::ConjugateGradient< _MatrixType, _UpLo, _Preconditioner >::_solve_with_guess_impl().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ conservative_sparse_sparse_product_impl()

template<typename Lhs , typename Rhs , typename ResultType >
static void Eigen::internal::conservative_sparse_sparse_product_impl ( const Lhs &  lhs,
const Rhs &  rhs,
ResultType &  res,
bool  sortedInsertion = false 
)
static
19{
20 typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
21 typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
22 typedef typename remove_all<ResultType>::type::Scalar ResScalar;
23
24 // make sure to call innerSize/outerSize since we fake the storage order.
25 Index rows = lhs.innerSize();
26 Index cols = rhs.outerSize();
27 eigen_assert(lhs.outerSize() == rhs.innerSize());
28
30 ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0);
32
33 std::memset(mask,0,sizeof(bool)*rows);
34
35 evaluator<Lhs> lhsEval(lhs);
36 evaluator<Rhs> rhsEval(rhs);
37
38 // estimate the number of non zero entries
39 // given a rhs column containing Y non zeros, we assume that the respective Y columns
40 // of the lhs differs in average of one non zeros, thus the number of non zeros for
41 // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
42 // per column of the lhs.
43 // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
44 Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
45
46 res.setZero();
47 res.reserve(Index(estimated_nnz_prod));
48 // we compute each column of the result, one after the other
49 for (Index j=0; j<cols; ++j)
50 {
51
52 res.startVec(j);
53 Index nnz = 0;
54 for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
55 {
56 RhsScalar y = rhsIt.value();
57 Index k = rhsIt.index();
58 for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
59 {
60 Index i = lhsIt.index();
61 LhsScalar x = lhsIt.value();
62 if(!mask[i])
63 {
64 mask[i] = true;
65 values[i] = x * y;
66 indices[nnz] = i;
67 ++nnz;
68 }
69 else
70 values[i] += x * y;
71 }
72 }
73 if(!sortedInsertion)
74 {
75 // unordered insertion
76 for(Index k=0; k<nnz; ++k)
77 {
78 Index i = indices[k];
79 res.insertBackByOuterInnerUnordered(j,i) = values[i];
80 mask[i] = false;
81 }
82 }
83 else
84 {
85 // alternative ordered insertion code:
86 const Index t200 = rows/11; // 11 == (log2(200)*1.39)
87 const Index t = (rows*100)/139;
88
89 // FIXME reserve nnz non zeros
90 // FIXME implement faster sorting algorithms for very small nnz
91 // if the result is sparse enough => use a quick sort
92 // otherwise => loop through the entire vector
93 // In order to avoid to perform an expensive log2 when the
94 // result is clearly very sparse we use a linear bound up to 200.
95 if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t)
96 {
97 if(nnz>1) std::sort(indices,indices+nnz);
98 for(Index k=0; k<nnz; ++k)
99 {
100 Index i = indices[k];
101 res.insertBackByOuterInner(j,i) = values[i];
102 mask[i] = false;
103 }
104 }
105 else
106 {
107 // dense path
108 for(Index i=0; i<rows; ++i)
109 {
110 if(mask[i])
111 {
112 mask[i] = false;
113 res.insertBackByOuterInner(j,i) = values[i];
114 }
115 }
116 }
117 }
118 }
119 res.finalize();
120}
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition Memory.h:644
Definition Meta.h:78
size_t rows(const T &raster)
Definition MarchingSquares.hpp:55

References ei_declare_aligned_stack_constructed_variable, eigen_assert, Eigen::numext::log2(), and y.

+ Here is the call graph for this function:

◆ const_cast_ptr()

template<typename T >
EIGEN_DEVICE_FUNC T * Eigen::internal::const_cast_ptr ( const T *  ptr)
inline
458{
459 return const_cast<T*>(ptr);
460}

◆ construct_elements_of_array()

template<typename T >
EIGEN_DEVICE_FUNC T * Eigen::internal::construct_elements_of_array ( T *  ptr,
std::size_t  size 
)
inline
266{
267 std::size_t i;
269 {
270 for (i = 0; i < size; ++i) ::new (ptr + i) T;
271 return ptr;
272 }
273 EIGEN_CATCH(...)
274 {
275 destruct_elements_of_array(ptr, i);
277 }
278 return NULL;
279}

References destruct_elements_of_array(), EIGEN_CATCH, EIGEN_THROW, and EIGEN_TRY.

Referenced by Eigen::internal::aligned_stack_memory_handler< T >::aligned_stack_memory_handler(), aligned_new(), conditional_aligned_new(), conditional_aligned_new_auto(), conditional_aligned_realloc_new(), and conditional_aligned_realloc_new_auto().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ convert_index()

template<typename IndexDest , typename IndexSrc >
EIGEN_DEVICE_FUNC IndexDest Eigen::internal::convert_index ( const IndexSrc &  idx)
inline
31 {
32 // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
33 eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
34 return IndexDest(idx);
35}
#define eigen_internal_assert(x)
Definition Macros.h:585

References eigen_internal_assert.

Referenced by Eigen::internal::AmbiVector< _Scalar, _StorageIndex >::coeffRef(), and Eigen::IncompleteLUT< _Scalar, _StorageIndex >::factorize().

+ Here is the caller graph for this function:

◆ cs_tdfs()

template<typename StorageIndex >
StorageIndex Eigen::internal::cs_tdfs ( StorageIndex  j,
StorageIndex  k,
StorageIndex *  head,
const StorageIndex *  next,
StorageIndex *  post,
StorageIndex *  stack 
)
61{
62 StorageIndex i, p, top = 0;
63 if(!head || !next || !post || !stack) return (-1); /* check inputs */
64 stack[0] = j; /* place j on the stack */
65 while (top >= 0) /* while (stack is not empty) */
66 {
67 p = stack[top]; /* p = top of stack */
68 i = head[p]; /* i = youngest child of p */
69 if(i == -1)
70 {
71 top--; /* p has no unordered children left */
72 post[k++] = p; /* node p is the kth postordered node */
73 }
74 else
75 {
76 head[p] = next[i]; /* remove i from children of p */
77 stack[++top] = i; /* start dfs on child node i */
78 }
79 }
80 return k;
81}
EIGEN_DEVICE_FUNC SegmentReturnType head(Index n)
This is the const version of head(Index).
Definition BlockMethods.h:919

References head().

+ Here is the call graph for this function:

◆ cs_wclear()

template<typename StorageIndex >
static StorageIndex Eigen::internal::cs_wclear ( StorageIndex  mark,
StorageIndex  lemax,
StorageIndex *  w,
StorageIndex  n 
)
static
46{
47 StorageIndex k;
48 if(mark < 2 || (mark + lemax < 0))
49 {
50 for(k = 0; k < n; k++)
51 if(w[k] != 0)
52 w[k] = 1;
53 mark = 2;
54 }
55 return (mark); /* at this point, w[0..n-1] < mark holds */
56}

◆ destruct_elements_of_array()

template<typename T >
EIGEN_DEVICE_FUNC void Eigen::internal::destruct_elements_of_array ( T *  ptr,
std::size_t  size 
)
inline
256{
257 // always destruct an array starting from the end.
258 if(ptr)
259 while(size) ptr[--size].~T();
260}

Referenced by conditional_aligned_realloc_new(), conditional_aligned_realloc_new_auto(), and construct_elements_of_array().

+ Here is the caller graph for this function:

◆ EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT() [1/6]

Eigen::internal::EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT ( add_assign_op  ,
scalar_difference_op  ,
sub_assign_op   
)

◆ EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT() [2/6]

Eigen::internal::EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT ( add_assign_op  ,
scalar_sum_op  ,
add_assign_op   
)

◆ EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT() [3/6]

Eigen::internal::EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT ( assign_op  ,
scalar_difference_op  ,
sub_assign_op   
)

◆ EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT() [4/6]

Eigen::internal::EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT ( assign_op  ,
scalar_sum_op  ,
add_assign_op   
)

◆ EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT() [5/6]

Eigen::internal::EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT ( sub_assign_op  ,
scalar_difference_op  ,
add_assign_op   
)

◆ EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT() [6/6]

Eigen::internal::EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT ( sub_assign_op  ,
scalar_sum_op  ,
sub_assign_op   
)

◆ EIGEN_MATHFUNC_RETVAL() [1/2]

template<typename Scalar >
Eigen::internal::EIGEN_MATHFUNC_RETVAL ( random  ,
Scalar   
)
inline
683{
684 return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
685}
#define EIGEN_MATHFUNC_IMPL(func, scalar)
Definition MathFunctions.h:68
#define random
Definition unistd.h:29

References EIGEN_MATHFUNC_IMPL, and random.

◆ EIGEN_MATHFUNC_RETVAL() [2/2]

template<typename Scalar >
Eigen::internal::EIGEN_MATHFUNC_RETVAL ( random  ,
Scalar   
) const &
inline

◆ EIGEN_MEMBER_FUNCTOR() [1/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( all  ,
(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [2/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( any  ,
(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [3/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( blueNorm  ,
(Size+5) *NumTraits< Scalar >::MulCost+(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [4/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( count  ,
(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [5/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( hypotNorm  ,
(Size-1) *functor_traits< scalar_hypot_op< Scalar > >::Cost   
)

◆ EIGEN_MEMBER_FUNCTOR() [6/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( maxCoeff  ,
(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [7/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( mean  ,
(Size-1) *NumTraits< Scalar >::AddCost+NumTraits< Scalar >::MulCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [8/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( minCoeff  ,
(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [9/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( norm  ,
(Size+5) *NumTraits< Scalar >::MulCost+(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [10/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( prod  ,
(Size-1) *NumTraits< Scalar >::MulCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [11/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( squaredNorm  ,
Size *NumTraits< Scalar >::MulCost+(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [12/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( stableNorm  ,
(Size+5) *NumTraits< Scalar >::MulCost+(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ EIGEN_MEMBER_FUNCTOR() [13/13]

Eigen::internal::EIGEN_MEMBER_FUNCTOR ( sum  ,
(Size-1) *NumTraits< Scalar >::AddCost   
)

◆ eigen_pastix() [1/4]

void Eigen::internal::eigen_pastix ( pastix_data_t **  pastix_data,
int  pastix_comm,
int  n,
int *  ptr,
int *  idx,
double *  vals,
int *  perm,
int *  invp,
double *  x,
int  nbrhs,
int *  iparm,
double *  dparm 
)
inline
75 {
76 if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
77 if (nbrhs == 0) {x = NULL; nbrhs=1;}
78 d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
79 }

◆ eigen_pastix() [2/4]

void Eigen::internal::eigen_pastix ( pastix_data_t **  pastix_data,
int  pastix_comm,
int  n,
int *  ptr,
int *  idx,
float *  vals,
int *  perm,
int *  invp,
float *  x,
int  nbrhs,
int *  iparm,
double *  dparm 
)
inline
68 {
69 if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
70 if (nbrhs == 0) {x = NULL; nbrhs=1;}
71 s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm);
72 }

Referenced by Eigen::PastixBase< Derived >::_solve_impl(), Eigen::PastixBase< Derived >::analyzePattern(), Eigen::PastixBase< Derived >::clean(), Eigen::PastixBase< Derived >::factorize(), and Eigen::PastixBase< Derived >::init().

+ Here is the caller graph for this function:

◆ eigen_pastix() [3/4]

void Eigen::internal::eigen_pastix ( pastix_data_t **  pastix_data,
int  pastix_comm,
int  n,
int *  ptr,
int *  idx,
std::complex< double > *  vals,
int *  perm,
int *  invp,
std::complex< double > *  x,
int  nbrhs,
int *  iparm,
double *  dparm 
)
inline
89 {
90 if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
91 if (nbrhs == 0) {x = NULL; nbrhs=1;}
92 z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_DCOMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_DCOMPLEX*>(x), nbrhs, iparm, dparm);
93 }
#define PASTIX_DCOMPLEX
Definition PaStiXSupport.h:20

References PASTIX_DCOMPLEX.

◆ eigen_pastix() [4/4]

void Eigen::internal::eigen_pastix ( pastix_data_t **  pastix_data,
int  pastix_comm,
int  n,
int *  ptr,
int *  idx,
std::complex< float > *  vals,
int *  perm,
int *  invp,
std::complex< float > *  x,
int  nbrhs,
int *  iparm,
double *  dparm 
)
inline
82 {
83 if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }
84 if (nbrhs == 0) {x = NULL; nbrhs=1;}
85 c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm);
86 }
#define PASTIX_COMPLEX
Definition PaStiXSupport.h:19

References PASTIX_COMPLEX.

◆ etree_find()

template<typename Index , typename IndexVector >
Index Eigen::internal::etree_find ( Index  i,
IndexVector &  pp 
)

Find the root of the tree/set containing the vertex i : Use Path halving

41{
42 Index p = pp(i); // Parent
43 Index gp = pp(p); // Grand parent
44 while (gp != p)
45 {
46 pp(i) = gp; // Parent pointer on find path is changed to former grand parent
47 i = gp;
48 p = pp(i);
49 gp = pp(p);
50 }
51 return p;
52}

Referenced by coletree().

+ Here is the caller graph for this function:

◆ evaluateProductBlockingSizesHeuristic()

template<typename LhsScalar , typename RhsScalar , int KcFactor, typename Index >
void Eigen::internal::evaluateProductBlockingSizesHeuristic ( Index k,
Index m,
Index n,
Index  num_threads = 1 
)
94{
96
97 // Explanations:
98 // Let's recall that the product algorithms form mc x kc vertical panels A' on the lhs and
99 // kc x nc blocks B' on the rhs. B' has to fit into L2/L3 cache. Moreover, A' is processed
100 // per mr x kc horizontal small panels where mr is the blocking size along the m dimension
101 // at the register level. This small horizontal panel has to stay within L1 cache.
102 std::ptrdiff_t l1, l2, l3;
103 manage_caching_sizes(GetAction, &l1, &l2, &l3);
104
105 if (num_threads > 1) {
106 typedef typename Traits::ResScalar ResScalar;
107 enum {
108 kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
109 ksub = Traits::mr * Traits::nr * sizeof(ResScalar),
110 kr = 8,
111 mr = Traits::mr,
112 nr = Traits::nr
113 };
114 // Increasing k gives us more time to prefetch the content of the "C"
115 // registers. However once the latency is hidden there is no point in
116 // increasing the value of k, so we'll cap it at 320 (value determined
117 // experimentally).
118 const Index k_cache = (numext::mini<Index>)((l1-ksub)/kdiv, 320);
119 if (k_cache < k) {
120 k = k_cache - (k_cache % kr);
122 }
123
124 const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k);
125 const Index n_per_thread = numext::div_ceil(n, num_threads);
126 if (n_cache <= n_per_thread) {
127 // Don't exceed the capacity of the l2 cache.
128 eigen_internal_assert(n_cache >= static_cast<Index>(nr));
129 n = n_cache - (n_cache % nr);
131 } else {
132 n = (numext::mini<Index>)(n, (n_per_thread + nr - 1) - ((n_per_thread + nr - 1) % nr));
133 }
134
135 if (l3 > l2) {
136 // l3 is shared between all cores, so we'll give each thread its own chunk of l3.
137 const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads);
138 const Index m_per_thread = numext::div_ceil(m, num_threads);
139 if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {
140 m = m_cache - (m_cache % mr);
142 } else {
143 m = (numext::mini<Index>)(m, (m_per_thread + mr - 1) - ((m_per_thread + mr - 1) % mr));
144 }
145 }
146 }
147 else {
148 // In unit tests we do not want to use extra large matrices,
149 // so we reduce the cache size to check the blocking strategy is not flawed
150#ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
151 l1 = 9*1024;
152 l2 = 32*1024;
153 l3 = 512*1024;
154#endif
155
156 // Early return for small problems because the computation below are time consuming for small problems.
157 // Perhaps it would make more sense to consider k*n*m??
158 // Note that for very tiny problem, this function should be bypassed anyway
159 // because we use the coefficient-based implementation for them.
160 if((numext::maxi)(k,(numext::maxi)(m,n))<48)
161 return;
162
163 typedef typename Traits::ResScalar ResScalar;
164 enum {
165 k_peeling = 8,
166 k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),
167 k_sub = Traits::mr * Traits::nr * sizeof(ResScalar)
168 };
169
170 // ---- 1st level of blocking on L1, yields kc ----
171
172 // Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel
173 // of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache.
174 // We also include a register-level block of the result (mx x nr).
175 // (In an ideal world only the lhs panel would stay in L1)
176 // Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of:
177 const Index max_kc = numext::maxi<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);
178 const Index old_k = k;
179 if(k>max_kc)
180 {
181 // We are really blocking on the third dimension:
182 // -> reduce blocking size to make sure the last block is as large as possible
183 // while keeping the same number of sweeps over the result.
184 k = (k%max_kc)==0 ? max_kc
185 : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));
186
187 eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && "the number of sweeps has to remain the same");
188 }
189
190 // ---- 2nd level of blocking on max(L2,L3), yields nc ----
191
192 // TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is:
193 // actual_l2 = max(l2, l3/nb_core_sharing_l3)
194 // The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it)
195 // For instance, it corresponds to 6MB of L3 shared among 4 cores.
196 #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
197 const Index actual_l2 = l3;
198 #else
199 const Index actual_l2 = 1572864; // == 1.5 MB
200 #endif
201
202 // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2.
203 // The second half is implicitly reserved to access the result and lhs coefficients.
204 // When k<max_kc, then nc can arbitrarily growth. In practice, it seems to be fruitful
205 // to limit this growth: we bound nc to growth by a factor x1.5.
206 // However, if the entire lhs block fit within L1, then we are not going to block on the rows at all,
207 // and it becomes fruitful to keep the packed rhs blocks in L1 if there is enough remaining space.
208 Index max_nc;
209 const Index lhs_bytes = m * k * sizeof(LhsScalar);
210 const Index remaining_l1 = l1- k_sub - lhs_bytes;
211 if(remaining_l1 >= Index(Traits::nr*sizeof(RhsScalar))*k)
212 {
213 // L1 blocking
214 max_nc = remaining_l1 / (k*sizeof(RhsScalar));
215 }
216 else
217 {
218 // L2 blocking
219 max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar));
220 }
221 // WARNING Below, we assume that Traits::nr is a power of two.
222 Index nc = numext::mini<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));
223 if(n>nc)
224 {
225 // We are really blocking over the columns:
226 // -> reduce blocking size to make sure the last block is as large as possible
227 // while keeping the same number of sweeps over the packed lhs.
228 // Here we allow one more sweep if this gives us a perfect match, thus the commented "-1"
229 n = (n%nc)==0 ? nc
230 : (nc - Traits::nr * ((nc/*-1*/-(n%nc))/(Traits::nr*(n/nc+1))));
231 }
232 else if(old_k==k)
233 {
234 // So far, no blocking at all, i.e., kc==k, and nc==n.
235 // In this case, let's perform a blocking over the rows such that the packed lhs data is kept in cache L1/L2
236 // TODO: part of this blocking strategy is now implemented within the kernel itself, so the L1-based heuristic here should be obsolete.
237 Index problem_size = k*n*sizeof(LhsScalar);
238 Index actual_lm = actual_l2;
239 Index max_mc = m;
240 if(problem_size<=1024)
241 {
242 // problem is small enough to keep in L1
243 // Let's choose m such that lhs's block fit in 1/3 of L1
244 actual_lm = l1;
245 }
246 else if(l3!=0 && problem_size<=32768)
247 {
248 // we have both L2 and L3, and problem is small enough to be kept in L2
249 // Let's choose m such that lhs's block fit in 1/3 of L2
250 actual_lm = l2;
251 max_mc = (numext::mini<Index>)(576,max_mc);
252 }
253 Index mc = (numext::mini<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc);
254 if (mc > Traits::mr) mc -= mc % Traits::mr;
255 else if (mc==0) return;
256 m = (m%mc)==0 ? mc
257 : (mc - Traits::mr * ((mc/*-1*/-(m%mc))/(Traits::mr*(m/mc+1))));
258 }
259 }
260}
Definition GeneralBlockPanelKernel.h:352
void manage_caching_sizes(Action action, std::ptrdiff_t *l1, std::ptrdiff_t *l2, std::ptrdiff_t *l3)
Definition GeneralBlockPanelKernel.h:55
T l2(const boost::geometry::model::d2::point_xy< T > &v)
Definition ExtrusionSimulator.cpp:166
CGAL::AABB_traits< EpicKernel, Primitive > Traits
Definition CutSurface.cpp:2730

References Eigen::numext::div_ceil(), eigen_internal_assert, Eigen::GetAction, manage_caching_sizes(), and Eigen::numext::maxi().

+ Here is the call graph for this function:

◆ extract_data()

◆ first_aligned() [1/2]

template<int Alignment, typename Derived >
static Index Eigen::internal::first_aligned ( const DenseBase< Derived > &  m)
inlinestatic
640{
641 enum { ReturnZero = (int(evaluator<Derived>::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) };
642 return first_aligned_impl<Alignment, Derived, ReturnZero>::run(m.derived());
643}

References Eigen::DirectAccessBit, and Eigen::internal::first_aligned_impl< Alignment, Derived, JustReturnZero >::run().

Referenced by first_default_aligned().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ first_aligned() [2/2]

template<int Alignment, typename Scalar , typename Index >
EIGEN_DEVICE_FUNC Index Eigen::internal::first_aligned ( const Scalar *  array,
Index  size 
)
inline
440{
441 const Index ScalarSize = sizeof(Scalar);
442 const Index AlignmentSize = Alignment / ScalarSize;
443 const Index AlignmentMask = AlignmentSize-1;
444
445 if(AlignmentSize<=1)
446 {
447 // Either the requested alignment if smaller than a scalar, or it exactly match a 1 scalar
448 // so that all elements of the array have the same alignment.
449 return 0;
450 }
451 else if( (UIntPtr(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0)
452 {
453 // The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size.
454 // Consequently, no element of the array is well aligned.
455 return size;
456 }
457 else
458 {
459 Index first = (AlignmentSize - (Index((UIntPtr(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask;
460 return (first < size) ? first : size;
461 }
462}

◆ first_default_aligned() [1/2]

template<typename Derived >
static Index Eigen::internal::first_default_aligned ( const DenseBase< Derived > &  m)
inlinestatic

◆ first_default_aligned() [2/2]

template<typename Scalar , typename Index >
EIGEN_DEVICE_FUNC Index Eigen::internal::first_default_aligned ( const Scalar *  array,
Index  size 
)
inline
468{
469 typedef typename packet_traits<Scalar>::type DefaultPacketType;
470 return first_aligned<unpacket_traits<DefaultPacketType>::alignment>(array, size);
471}

◆ first_multiple()

template<typename Index >
Index Eigen::internal::first_multiple ( Index  size,
Index  base 
)
inline
477{
478 return ((size+base-1)/base)*base;
479}

Referenced by Eigen::internal::LU_kernel_bmod< SegSizeAtCompileTime >::run().

+ Here is the caller graph for this function:

◆ fortran_to_c_numbering()

template<typename MatrixType >
void Eigen::internal::fortran_to_c_numbering ( MatrixType &  mat)
112 {
113 // Check the Numbering
114 if ( mat.outerIndexPtr()[0] == 1 )
115 { // Convert to C-style numbering
116 int i;
117 for(i = 0; i <= mat.rows(); ++i)
118 --mat.outerIndexPtr()[i];
119 for(i = 0; i < mat.nonZeros(); ++i)
120 --mat.innerIndexPtr()[i];
121 }
122 }

◆ gebp_madd()

template<typename CJ , typename A , typename B , typename C , typename T >
EIGEN_STRONG_INLINE void Eigen::internal::gebp_madd ( const CJ &  cj,
A &  a,
B &  b,
C &  c,
T &  t 
)
332 {
334 }
Definition GeneralBlockPanelKernel.h:316

References Eigen::internal::gebp_madd_selector< CJ, A, B, C, T >::run().

+ Here is the call graph for this function:

◆ general_det3_helper()

template<typename Derived >
EIGEN_DEVICE_FUNC const Derived::Scalar Eigen::internal::general_det3_helper ( const MatrixBase< Derived > &  matrix,
int  i1,
int  i2,
int  i3,
int  j1,
int  j2,
int  j3 
)
inline
206{
207 return matrix.coeff(i1,j1)
208 * (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2));
209}

Referenced by cofactor_4x4().

+ Here is the caller graph for this function:

◆ generic_fast_tanh_float()

template<typename T >
T Eigen::internal::generic_fast_tanh_float ( const T &  a_x)
27{
28 // Clamp the inputs to the range [-9, 9] since anything outside
29 // this range is +/-1.0f in single-precision.
30 const T plus_9 = pset1<T>(9.f);
31 const T minus_9 = pset1<T>(-9.f);
32 // NOTE GCC prior to 6.3 might improperly optimize this max/min
33 // step such that if a_x is nan, x will be either 9 or -9,
34 // and tanh will return 1 or -1 instead of nan.
35 // This is supposed to be fixed in gcc6.3,
36 // see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
37 const T x = pmax(minus_9,pmin(plus_9,a_x));
38 // The monomial coefficients of the numerator polynomial (odd).
39 const T alpha_1 = pset1<T>(4.89352455891786e-03f);
40 const T alpha_3 = pset1<T>(6.37261928875436e-04f);
41 const T alpha_5 = pset1<T>(1.48572235717979e-05f);
42 const T alpha_7 = pset1<T>(5.12229709037114e-08f);
43 const T alpha_9 = pset1<T>(-8.60467152213735e-11f);
44 const T alpha_11 = pset1<T>(2.00018790482477e-13f);
45 const T alpha_13 = pset1<T>(-2.76076847742355e-16f);
46
47 // The monomial coefficients of the denominator polynomial (even).
48 const T beta_0 = pset1<T>(4.89352518554385e-03f);
49 const T beta_2 = pset1<T>(2.26843463243900e-03f);
50 const T beta_4 = pset1<T>(1.18534705686654e-04f);
51 const T beta_6 = pset1<T>(1.19825839466702e-06f);
52
53 // Since the polynomials are odd/even, we need x^2.
54 const T x2 = pmul(x, x);
55
56 // Evaluate the numerator polynomial p.
57 T p = pmadd(x2, alpha_13, alpha_11);
58 p = pmadd(x2, p, alpha_9);
59 p = pmadd(x2, p, alpha_7);
60 p = pmadd(x2, p, alpha_5);
61 p = pmadd(x2, p, alpha_3);
62 p = pmadd(x2, p, alpha_1);
63 p = pmul(x, p);
64
65 // Evaluate the denominator polynomial p.
66 T q = pmadd(x2, beta_6, beta_4);
67 q = pmadd(x2, q, beta_2);
68 q = pmadd(x2, q, beta_0);
69
70 // Divide the numerator by the denominator.
71 return pdiv(p, q);
72}
EIGEN_DEVICE_FUNC Packet pdiv(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:175
EIGEN_DEVICE_FUNC Packet pmax(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:185
EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f &a, const Packet4f &b, const Packet4f &c)
Definition PacketMath.h:388
EIGEN_DEVICE_FUNC Packet pmul(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:170
EIGEN_DEVICE_FUNC Packet pmin(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:180

References pdiv(), pmadd(), pmax(), pmin(), and pmul().

Referenced by ptanh< Packet4f >(), and ptanh< Packet8f >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetMarketLine() [1/2]

template<typename Scalar , typename IndexType >
bool Eigen::internal::GetMarketLine ( std::stringstream &  line,
IndexType &  M,
IndexType &  N,
IndexType &  i,
IndexType &  j,
Scalar &  value 
)
inline
22 {
23 line >> i >> j >> value;
24 i--;
25 j--;
26 if(i>=0 && j>=0 && i<M && j<N)
27 {
28 return true;
29 }
30 else
31 return false;
32 }

Referenced by Eigen::loadMarket().

+ Here is the caller graph for this function:

◆ GetMarketLine() [2/2]

template<typename Scalar , typename IndexType >
bool Eigen::internal::GetMarketLine ( std::stringstream &  line,
IndexType &  M,
IndexType &  N,
IndexType &  i,
IndexType &  j,
std::complex< Scalar > &  value 
)
inline
35 {
36 Scalar valR, valI;
37 line >> i >> j >> valR >> valI;
38 i--;
39 j--;
40 if(i>=0 && j>=0 && i<M && j<N)
41 {
42 value = std::complex<Scalar>(valR, valI);
43 return true;
44 }
45 else
46 return false;
47 }

◆ GetVectorElt() [1/2]

template<typename RealScalar >
void Eigen::internal::GetVectorElt ( const std::string &  line,
RealScalar &  val 
)
inline
51 {
52 std::istringstream newline(line);
53 newline >> val;
54 }

Referenced by Eigen::loadMarketVector().

+ Here is the caller graph for this function:

◆ GetVectorElt() [2/2]

template<typename RealScalar >
void Eigen::internal::GetVectorElt ( const std::string &  line,
std::complex< RealScalar > &  val 
)
inline
58 {
59 RealScalar valR, valI;
60 std::istringstream newline(line);
61 newline >> valR >> valI;
62 val = std::complex<RealScalar>(valR, valI);
63 }

◆ handmade_aligned_free()

void Eigen::internal::handmade_aligned_free ( void ptr)
inline
97{
98 if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
99}

Referenced by aligned_free().

+ Here is the caller graph for this function:

◆ handmade_aligned_malloc()

void * Eigen::internal::handmade_aligned_malloc ( std::size_t  size)
inline
87{
88 void *original = std::malloc(size+EIGEN_DEFAULT_ALIGN_BYTES);
89 if (original == 0) return 0;
90 void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
91 *(reinterpret_cast<void**>(aligned) - 1) = original;
92 return aligned;
93}
#define EIGEN_DEFAULT_ALIGN_BYTES
Definition Macros.h:781

References EIGEN_DEFAULT_ALIGN_BYTES.

Referenced by aligned_malloc(), and handmade_aligned_realloc().

+ Here is the caller graph for this function:

◆ handmade_aligned_realloc()

void * Eigen::internal::handmade_aligned_realloc ( void ptr,
std::size_t  size,
std::size_t  = 0 
)
inline
107{
108 if (ptr == 0) return handmade_aligned_malloc(size);
109 void *original = *(reinterpret_cast<void**>(ptr) - 1);
110 std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);
111 original = std::realloc(original,size+EIGEN_DEFAULT_ALIGN_BYTES);
112 if (original == 0) return 0;
113 void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);
114 void *previous_aligned = static_cast<char *>(original)+previous_offset;
115 if(aligned!=previous_aligned)
116 std::memmove(aligned, previous_aligned, size);
117
118 *(reinterpret_cast<void**>(aligned) - 1) = original;
119 return aligned;
120}

References EIGEN_DEFAULT_ALIGN_BYTES, and handmade_aligned_malloc().

Referenced by aligned_realloc().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ householder_qr_inplace_unblocked()

template<typename MatrixQR , typename HCoeffs >
void Eigen::internal::householder_qr_inplace_unblocked ( MatrixQR &  mat,
HCoeffs &  hCoeffs,
typename MatrixQR::Scalar *  tempData = 0 
)
257{
258 typedef typename MatrixQR::Scalar Scalar;
259 typedef typename MatrixQR::RealScalar RealScalar;
260 Index rows = mat.rows();
261 Index cols = mat.cols();
262 Index size = (std::min)(rows,cols);
263
264 eigen_assert(hCoeffs.size() == size);
265
267 TempType tempVector;
268 if(tempData==0)
269 {
270 tempVector.resize(cols);
271 tempData = tempVector.data();
272 }
273
274 for(Index k = 0; k < size; ++k)
275 {
276 Index remainingRows = rows - k;
277 Index remainingCols = cols - k - 1;
278
279 RealScalar beta;
280 mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta);
281 mat.coeffRef(k,k) = beta;
282
283 // apply H to remaining part of m_qr from the left
284 mat.bottomRightCorner(remainingRows, remainingCols)
285 .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1);
286 }
287}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index rows, Index cols)
Definition PlainObjectBase.h:279

References eigen_assert, and Eigen::PlainObjectBase< Derived >::resize().

Referenced by Eigen::internal::householder_qr_inplace_blocked< MatrixQR, HCoeffs, MatrixQRScalar, InnerStrideIsOne >::run().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ignore_unused_variable()

template<typename T >
EIGEN_DEVICE_FUNC void Eigen::internal::ignore_unused_variable ( const T &  )
615{}

◆ is_same_dense() [1/2]

template<typename T1 , typename T2 >
bool Eigen::internal::is_same_dense ( const T1 &  ,
const T2 &  ,
typename enable_if<!(has_direct_access< T1 >::ret &&has_direct_access< T2 >::ret), T1 >::type *  = 0 
)
668{
669 return false;
670}

◆ is_same_dense() [2/2]

template<typename T1 , typename T2 >
bool Eigen::internal::is_same_dense ( const T1 &  mat1,
const T2 &  mat2,
typename enable_if< has_direct_access< T1 >::ret &&has_direct_access< T2 >::ret, T1 >::type *  = 0 
)
662{
663 return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
664}

Referenced by Eigen::LLT< _MatrixType, _UpLo >::compute(), Eigen::internal::triangular_solve_retval< Side, TriangularType, Rhs >::evalTo(), Eigen::HouseholderSequence< VectorsType, CoeffsType, Side >::evalTo(), Eigen::internal::permutation_matrix_product< ExpressionType, Side, Transposed, DenseShape >::run(), and Eigen::internal::transposition_matrix_product< ExpressionType, Side, Transposed, ExpressionShape >::run().

+ Here is the caller graph for this function:

◆ isApprox()

template<typename Scalar >
EIGEN_DEVICE_FUNC bool Eigen::internal::isApprox ( const Scalar &  x,
const Scalar &  y,
const typename NumTraits< Scalar >::Real &  precision = NumTraits<Scalar>::dummy_precision() 
)
inline

◆ isApproxOrLessThan()

template<typename Scalar >
EIGEN_DEVICE_FUNC bool Eigen::internal::isApproxOrLessThan ( const Scalar &  x,
const Scalar &  y,
const typename NumTraits< Scalar >::Real &  precision = NumTraits<Scalar>::dummy_precision() 
)
inline
1370{
1371 return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);
1372}

References y.

◆ isfinite_impl() [1/3]

template<typename T >
EIGEN_DEVICE_FUNC bool Eigen::internal::isfinite_impl ( const std::complex< T > &  x)
1267{
1268 return (numext::isfinite)(numext::real(x)) && (numext::isfinite)(numext::imag(x));
1269}

References Eigen::numext::isfinite().

+ Here is the call graph for this function:

◆ isfinite_impl() [2/3]

template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if< internal::is_integral< T >::value, bool >::type Eigen::internal::isfinite_impl ( const T &  )
709{ return true; }

Referenced by Eigen::numext::isfinite().

+ Here is the caller graph for this function:

◆ isfinite_impl() [3/3]

template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if<(!internal::is_integral< T >::value)&&(!NumTraits< T >::IsComplex), bool >::type Eigen::internal::isfinite_impl ( const T &  x)
715{
716 #ifdef __CUDA_ARCH__
717 return (::isfinite)(x);
718 #elif EIGEN_USE_STD_FPCLASSIFY
719 using std::isfinite;
720 return isfinite EIGEN_NOT_A_MACRO (x);
721 #else
722 return x<=NumTraits<T>::highest() && x>=NumTraits<T>::lowest();
723 #endif
724}
#define EIGEN_NOT_A_MACRO
Definition Macros.h:327

References EIGEN_NOT_A_MACRO.

◆ isinf_impl() [1/3]

template<typename T >
EIGEN_DEVICE_FUNC bool Eigen::internal::isinf_impl ( const std::complex< T > &  x)
1279{
1280 return ((numext::isinf)(numext::real(x)) || (numext::isinf)(numext::imag(x))) && (!(numext::isnan)(x));
1281}

References Eigen::numext::isinf(), and Eigen::numext::isnan().

+ Here is the call graph for this function:

◆ isinf_impl() [2/3]

template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if< internal::is_integral< T >::value, bool >::type Eigen::internal::isinf_impl ( const T &  )
704{ return false; }

Referenced by Eigen::numext::isinf().

+ Here is the caller graph for this function:

◆ isinf_impl() [3/3]

template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if<(!internal::is_integral< T >::value)&&(!NumTraits< T >::IsComplex), bool >::type Eigen::internal::isinf_impl ( const T &  x)
730{
731 #ifdef __CUDA_ARCH__
732 return (::isinf)(x);
733 #elif EIGEN_USE_STD_FPCLASSIFY
734 using std::isinf;
735 return isinf EIGEN_NOT_A_MACRO (x);
736 #else
737 return x>NumTraits<T>::highest() || x<NumTraits<T>::lowest();
738 #endif
739}

References EIGEN_NOT_A_MACRO.

◆ isMuchSmallerThan()

template<typename Scalar , typename OtherScalar >
EIGEN_DEVICE_FUNC bool Eigen::internal::isMuchSmallerThan ( const Scalar &  x,
const OtherScalar &  y,
const typename NumTraits< Scalar >::Real &  precision = NumTraits<Scalar>::dummy_precision() 
)
inline

◆ isnan_impl() [1/3]

template<typename T >
EIGEN_DEVICE_FUNC bool Eigen::internal::isnan_impl ( const std::complex< T > &  x)
1273{
1274 return (numext::isnan)(numext::real(x)) || (numext::isnan)(numext::imag(x));
1275}

References Eigen::numext::isnan().

+ Here is the call graph for this function:

◆ isnan_impl() [2/3]

template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if< internal::is_integral< T >::value, bool >::type Eigen::internal::isnan_impl ( const T &  )
699{ return false; }

Referenced by Eigen::numext::isnan().

+ Here is the caller graph for this function:

◆ isnan_impl() [3/3]

template<typename T >
EIGEN_DEVICE_FUNC internal::enable_if<(!internal::is_integral< T >::value)&&(!NumTraits< T >::IsComplex), bool >::type Eigen::internal::isnan_impl ( const T &  x)
745{
746 #ifdef __CUDA_ARCH__
747 return (::isnan)(x);
748 #elif EIGEN_USE_STD_FPCLASSIFY
749 using std::isnan;
750 return isnan EIGEN_NOT_A_MACRO (x);
751 #else
752 return x != x;
753 #endif
754}

References EIGEN_NOT_A_MACRO.

◆ least_square_conjugate_gradient()

template<typename MatrixType , typename Rhs , typename Dest , typename Preconditioner >
EIGEN_DONT_INLINE void Eigen::internal::least_square_conjugate_gradient ( const MatrixType &  mat,
const Rhs &  rhs,
Dest &  x,
const Preconditioner &  precond,
Index iters,
typename Dest::RealScalar &  tol_error 
)
31{
32 using std::sqrt;
33 using std::abs;
34 typedef typename Dest::RealScalar RealScalar;
35 typedef typename Dest::Scalar Scalar;
36 typedef Matrix<Scalar,Dynamic,1> VectorType;
37
38 RealScalar tol = tol_error;
39 Index maxIters = iters;
40
41 Index m = mat.rows(), n = mat.cols();
42
43 VectorType residual = rhs - mat * x;
44 VectorType normal_residual = mat.adjoint() * residual;
45
46 RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm();
47 if(rhsNorm2 == 0)
48 {
49 x.setZero();
50 iters = 0;
51 tol_error = 0;
52 return;
53 }
54 RealScalar threshold = tol*tol*rhsNorm2;
55 RealScalar residualNorm2 = normal_residual.squaredNorm();
56 if (residualNorm2 < threshold)
57 {
58 iters = 0;
59 tol_error = sqrt(residualNorm2 / rhsNorm2);
60 return;
61 }
62
63 VectorType p(n);
64 p = precond.solve(normal_residual); // initial search direction
65
66 VectorType z(n), tmp(m);
67 RealScalar absNew = numext::real(normal_residual.dot(p)); // the square of the absolute value of r scaled by invM
68 Index i = 0;
69 while(i < maxIters)
70 {
71 tmp.noalias() = mat * p;
72
73 Scalar alpha = absNew / tmp.squaredNorm(); // the amount we travel on dir
74 x += alpha * p; // update solution
75 residual -= alpha * tmp; // update residual
76 normal_residual = mat.adjoint() * residual; // update residual of the normal equation
77
78 residualNorm2 = normal_residual.squaredNorm();
79 if(residualNorm2 < threshold)
80 break;
81
82 z = precond.solve(normal_residual); // approximately solve for "A'A z = normal_residual"
83
84 RealScalar absOld = absNew;
85 absNew = numext::real(normal_residual.dot(z)); // update the absolute value of r
86 RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
87 p = z + beta * p; // update search direction
88 i++;
89 }
90 tol_error = sqrt(residualNorm2 / rhsNorm2);
91 iters = i;
92}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const
Definition PlainObjectBase.h:151

References Eigen::PlainObjectBase< Derived >::rows(), and sqrt().

Referenced by Eigen::LeastSquaresConjugateGradient< _MatrixType, _Preconditioner >::_solve_with_guess_impl().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ llt_rank_update_lower()

template<typename MatrixType , typename VectorType >
static Index Eigen::internal::llt_rank_update_lower ( MatrixType &  mat,
const VectorType &  vec,
const typename MatrixType::RealScalar &  sigma 
)
static
234{
235 using std::sqrt;
236 typedef typename MatrixType::Scalar Scalar;
237 typedef typename MatrixType::RealScalar RealScalar;
238 typedef typename MatrixType::ColXpr ColXpr;
239 typedef typename internal::remove_all<ColXpr>::type ColXprCleaned;
240 typedef typename ColXprCleaned::SegmentReturnType ColXprSegment;
241 typedef Matrix<Scalar,Dynamic,1> TempVectorType;
242 typedef typename TempVectorType::SegmentReturnType TempVecSegment;
243
244 Index n = mat.cols();
245 eigen_assert(mat.rows()==n && vec.size()==n);
246
247 TempVectorType temp;
248
249 if(sigma>0)
250 {
251 // This version is based on Givens rotations.
252 // It is faster than the other one below, but only works for updates,
253 // i.e., for sigma > 0
254 temp = sqrt(sigma) * vec;
255
256 for(Index i=0; i<n; ++i)
257 {
259 g.makeGivens(mat(i,i), -temp(i), &mat(i,i));
260
261 Index rs = n-i-1;
262 if(rs>0)
263 {
264 ColXprSegment x(mat.col(i).tail(rs));
265 TempVecSegment y(temp.tail(rs));
267 }
268 }
269 }
270 else
271 {
272 temp = vec;
273 RealScalar beta = 1;
274 for(Index j=0; j<n; ++j)
275 {
276 RealScalar Ljj = numext::real(mat.coeff(j,j));
277 RealScalar dj = numext::abs2(Ljj);
278 Scalar wj = temp.coeff(j);
279 RealScalar swj2 = sigma*numext::abs2(wj);
280 RealScalar gamma = dj*beta + swj2;
281
282 RealScalar x = dj + swj2/beta;
283 if (x<=RealScalar(0))
284 return j;
285 RealScalar nLjj = sqrt(x);
286 mat.coeffRef(j,j) = nLjj;
287 beta += swj2/dj;
288
289 // Update the terms of L
290 Index rs = n-j-1;
291 if(rs)
292 {
293 temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs);
294 if(gamma != 0)
295 mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs);
296 }
297 }
298 }
299 return -1;
300}
Block< Derived, internal::traits< Derived >::RowsAtCompileTime, 1, !IsRowMajor > ColXpr
Definition BlockMethods.h:14
Rotation given by a cosine-sine pair.
Definition Jacobi.h:35
void makeGivens(const Scalar &p, const Scalar &q, Scalar *r=0)
Definition Jacobi.h:148
T type
Definition Meta.h:78
void apply_rotation_in_the_plane(DenseBase< VectorX > &xpr_x, DenseBase< VectorY > &xpr_y, const JacobiRotation< OtherScalar > &j)
Definition Jacobi.h:432
TCoord< P > x(const P &p)
Definition geometry_traits.hpp:297

References apply_rotation_in_the_plane(), Eigen::PlainObjectBase< Derived >::cols(), eigen_assert, Eigen::JacobiRotation< Scalar >::makeGivens(), sqrt(), and y.

Referenced by Eigen::internal::llt_inplace< Scalar, Lower >::rankUpdate().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ LUnumTempV()

Index Eigen::internal::LUnumTempV ( Index m,
Index w,
Index t,
Index b 
)
inline
40{
41 return (std::max)(m, (t+b)*w);
42}

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::factorize().

+ Here is the caller graph for this function:

◆ LUTempSpace()

template<typename Scalar >
Index Eigen::internal::LUTempSpace ( Index m,
Index w 
)
inline
46{
47 return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar);
48}

References LUNoMarker.

◆ make_block_householder_triangular_factor()

template<typename TriangularFactorType , typename VectorsType , typename CoeffsType >
void Eigen::internal::make_block_householder_triangular_factor ( TriangularFactorType &  triFactor,
const VectorsType &  vectors,
const CoeffsType &  hCoeffs 
)
52{
53 const Index nbVecs = vectors.cols();
54 eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);
55
56 for(Index i = nbVecs-1; i >=0 ; --i)
57 {
58 Index rs = vectors.rows() - i - 1;
59 Index rt = nbVecs-i-1;
60
61 if(rt>0)
62 {
63 triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint()
64 * vectors.bottomRightCorner(rs, rt).template triangularView<UnitLower>();
65
66 // FIXME add .noalias() once the triangular product can work inplace
67 triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>();
68
69 }
70 triFactor(i,i) = hCoeffs(i);
71 }
72}

References eigen_assert.

Referenced by apply_block_householder_on_the_left().

+ Here is the caller graph for this function:

◆ manage_caching_sizes()

void Eigen::internal::manage_caching_sizes ( Action  action,
std::ptrdiff_t *  l1,
std::ptrdiff_t *  l2,
std::ptrdiff_t *  l3 
)
inline
56{
57 static CacheSizes m_cacheSizes;
58
59 if(action==SetAction)
60 {
61 // set the cpu cache size and cache all block sizes from a global cache size in byte
62 eigen_internal_assert(l1!=0 && l2!=0);
63 m_cacheSizes.m_l1 = *l1;
64 m_cacheSizes.m_l2 = *l2;
65 m_cacheSizes.m_l3 = *l3;
66 }
67 else if(action==GetAction)
68 {
69 eigen_internal_assert(l1!=0 && l2!=0);
70 *l1 = m_cacheSizes.m_l1;
71 *l2 = m_cacheSizes.m_l2;
72 *l3 = m_cacheSizes.m_l3;
73 }
74 else
75 {
77 }
78}
Definition GeneralBlockPanelKernel.h:39
std::ptrdiff_t m_l1
Definition GeneralBlockPanelKernel.h:48
std::ptrdiff_t m_l2
Definition GeneralBlockPanelKernel.h:49
std::ptrdiff_t m_l3
Definition GeneralBlockPanelKernel.h:50

References eigen_internal_assert, Eigen::GetAction, Eigen::internal::CacheSizes::m_l1, Eigen::internal::CacheSizes::m_l2, Eigen::internal::CacheSizes::m_l3, and Eigen::SetAction.

Referenced by evaluateProductBlockingSizesHeuristic(), Eigen::initParallel(), Eigen::l1CacheSize(), Eigen::l2CacheSize(), Eigen::l3CacheSize(), Eigen::internal::triangular_solve_matrix< Scalar, Index, OnTheLeft, Mode, Conjugate, TriStorageOrder, ColMajor >::run(), and Eigen::setCpuCacheSizes().

+ Here is the caller graph for this function:

◆ manage_caching_sizes_helper()

std::ptrdiff_t Eigen::internal::manage_caching_sizes_helper ( std::ptrdiff_t  a,
std::ptrdiff_t  b 
)
inline
24{
25 return a<=0 ? b : a;
26}

Referenced by Eigen::internal::CacheSizes::CacheSizes().

+ Here is the caller graph for this function:

◆ manage_multi_threading()

void Eigen::internal::manage_multi_threading ( Action  action,
int *  v 
)
inline
19{
20 static EIGEN_UNUSED int m_maxThreads = -1;
21
22 if(action==SetAction)
23 {
25 m_maxThreads = *v;
26 }
27 else if(action==GetAction)
28 {
30 #ifdef EIGEN_HAS_OPENMP
31 if(m_maxThreads>0)
32 *v = m_maxThreads;
33 else
34 *v = omp_get_max_threads();
35 #else
36 *v = 1;
37 #endif
38 }
39 else
40 {
42 }
43}
#define EIGEN_UNUSED
Definition Macros.h:609

References eigen_internal_assert, EIGEN_UNUSED, Eigen::GetAction, and Eigen::SetAction.

Referenced by Eigen::initParallel(), Eigen::nbThreads(), and Eigen::setNbThreads().

+ Here is the caller graph for this function:

◆ map_superlu()

template<typename Scalar , int Flags, typename Index >
MappedSparseMatrix< Scalar, Flags, Index > Eigen::internal::map_superlu ( SluMatrix sluMat)

View a Super LU matrix as an Eigen expression

299{
300 eigen_assert(((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR)
301 || ((Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC));
302
303 Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;
304
306 sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],
307 sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );
308}
Sparse matrix.
Definition MappedSparseMatrix.h:34
@ RowMajor
Definition Constants.h:322
struct Eigen::SluMatrix::@636 storage

References Eigen::ColMajor, eigen_assert, Eigen::RowMajor, and Eigen::SluMatrix::storage.

◆ minimum_degree_ordering()

template<typename Scalar , typename StorageIndex >
void Eigen::internal::minimum_degree_ordering ( SparseMatrix< Scalar, ColMajor, StorageIndex > &  C,
PermutationMatrix< Dynamic, Dynamic, StorageIndex > &  perm 
)
95{
96 using std::sqrt;
97
98 StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
99 k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
100 ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;
101
102 StorageIndex n = StorageIndex(C.cols());
103 dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(double(n)))); /* find dense threshold */
104 dense = (std::min)(n-2, dense);
105
106 StorageIndex cnz = StorageIndex(C.nonZeros());
107 perm.resize(n+1);
108 t = cnz + cnz/5 + 2*n; /* add elbow room to C */
109 C.resizeNonZeros(t);
110
111 // get workspace
112 ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0);
113 StorageIndex* len = W;
114 StorageIndex* nv = W + (n+1);
115 StorageIndex* next = W + 2*(n+1);
116 StorageIndex* head = W + 3*(n+1);
117 StorageIndex* elen = W + 4*(n+1);
118 StorageIndex* degree = W + 5*(n+1);
119 StorageIndex* w = W + 6*(n+1);
120 StorageIndex* hhead = W + 7*(n+1);
121 StorageIndex* last = perm.indices().data(); /* use P as workspace for last */
122
123 /* --- Initialize quotient graph ---------------------------------------- */
124 StorageIndex* Cp = C.outerIndexPtr();
125 StorageIndex* Ci = C.innerIndexPtr();
126 for(k = 0; k < n; k++)
127 len[k] = Cp[k+1] - Cp[k];
128 len[n] = 0;
129 nzmax = t;
130
131 for(i = 0; i <= n; i++)
132 {
133 head[i] = -1; // degree list i is empty
134 last[i] = -1;
135 next[i] = -1;
136 hhead[i] = -1; // hash list i is empty
137 nv[i] = 1; // node i is just one node
138 w[i] = 1; // node i is alive
139 elen[i] = 0; // Ek of node i is empty
140 degree[i] = len[i]; // degree of node i
141 }
142 mark = internal::cs_wclear<StorageIndex>(0, 0, w, n); /* clear w */
143
144 /* --- Initialize degree lists ------------------------------------------ */
145 for(i = 0; i < n; i++)
146 {
147 bool has_diag = false;
148 for(p = Cp[i]; p<Cp[i+1]; ++p)
149 if(Ci[p]==i)
150 {
151 has_diag = true;
152 break;
153 }
154
155 d = degree[i];
156 if(d == 1 && has_diag) /* node i is empty */
157 {
158 elen[i] = -2; /* element i is dead */
159 nel++;
160 Cp[i] = -1; /* i is a root of assembly tree */
161 w[i] = 0;
162 }
163 else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */
164 {
165 nv[i] = 0; /* absorb i into element n */
166 elen[i] = -1; /* node i is dead */
167 nel++;
168 Cp[i] = amd_flip (n);
169 nv[n]++;
170 }
171 else
172 {
173 if(head[d] != -1) last[head[d]] = i;
174 next[i] = head[d]; /* put node i in degree list d */
175 head[d] = i;
176 }
177 }
178
179 elen[n] = -2; /* n is a dead element */
180 Cp[n] = -1; /* n is a root of assembly tree */
181 w[n] = 0; /* n is a dead element */
182
183 while (nel < n) /* while (selecting pivots) do */
184 {
185 /* --- Select node of minimum approximate degree -------------------- */
186 for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {}
187 if(next[k] != -1) last[next[k]] = -1;
188 head[mindeg] = next[k]; /* remove k from degree list */
189 elenk = elen[k]; /* elenk = |Ek| */
190 nvk = nv[k]; /* # of nodes k represents */
191 nel += nvk; /* nv[k] nodes of A eliminated */
192
193 /* --- Garbage collection ------------------------------------------- */
194 if(elenk > 0 && cnz + mindeg >= nzmax)
195 {
196 for(j = 0; j < n; j++)
197 {
198 if((p = Cp[j]) >= 0) /* j is a live node or element */
199 {
200 Cp[j] = Ci[p]; /* save first entry of object */
201 Ci[p] = amd_flip (j); /* first entry is now amd_flip(j) */
202 }
203 }
204 for(q = 0, p = 0; p < cnz; ) /* scan all of memory */
205 {
206 if((j = amd_flip (Ci[p++])) >= 0) /* found object j */
207 {
208 Ci[q] = Cp[j]; /* restore first entry of object */
209 Cp[j] = q++; /* new pointer to object j */
210 for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++];
211 }
212 }
213 cnz = q; /* Ci[cnz...nzmax-1] now free */
214 }
215
216 /* --- Construct new element ---------------------------------------- */
217 dk = 0;
218 nv[k] = -nvk; /* flag k as in Lk */
219 p = Cp[k];
220 pk1 = (elenk == 0) ? p : cnz; /* do in place if elen[k] == 0 */
221 pk2 = pk1;
222 for(k1 = 1; k1 <= elenk + 1; k1++)
223 {
224 if(k1 > elenk)
225 {
226 e = k; /* search the nodes in k */
227 pj = p; /* list of nodes starts at Ci[pj]*/
228 ln = len[k] - elenk; /* length of list of nodes in k */
229 }
230 else
231 {
232 e = Ci[p++]; /* search the nodes in e */
233 pj = Cp[e];
234 ln = len[e]; /* length of list of nodes in e */
235 }
236 for(k2 = 1; k2 <= ln; k2++)
237 {
238 i = Ci[pj++];
239 if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */
240 dk += nvi; /* degree[Lk] += size of node i */
241 nv[i] = -nvi; /* negate nv[i] to denote i in Lk*/
242 Ci[pk2++] = i; /* place i in Lk */
243 if(next[i] != -1) last[next[i]] = last[i];
244 if(last[i] != -1) /* remove i from degree list */
245 {
246 next[last[i]] = next[i];
247 }
248 else
249 {
250 head[degree[i]] = next[i];
251 }
252 }
253 if(e != k)
254 {
255 Cp[e] = amd_flip (k); /* absorb e into k */
256 w[e] = 0; /* e is now a dead element */
257 }
258 }
259 if(elenk != 0) cnz = pk2; /* Ci[cnz...nzmax] is free */
260 degree[k] = dk; /* external degree of k - |Lk\i| */
261 Cp[k] = pk1; /* element k is in Ci[pk1..pk2-1] */
262 len[k] = pk2 - pk1;
263 elen[k] = -2; /* k is now an element */
264
265 /* --- Find set differences ----------------------------------------- */
266 mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n); /* clear w if necessary */
267 for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */
268 {
269 i = Ci[pk];
270 if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */
271 nvi = -nv[i]; /* nv[i] was negated */
272 wnvi = mark - nvi;
273 for(p = Cp[i]; p <= Cp[i] + eln - 1; p++) /* scan Ei */
274 {
275 e = Ci[p];
276 if(w[e] >= mark)
277 {
278 w[e] -= nvi; /* decrement |Le\Lk| */
279 }
280 else if(w[e] != 0) /* ensure e is a live element */
281 {
282 w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */
283 }
284 }
285 }
286
287 /* --- Degree update ------------------------------------------------ */
288 for(pk = pk1; pk < pk2; pk++) /* scan2: degree update */
289 {
290 i = Ci[pk]; /* consider node i in Lk */
291 p1 = Cp[i];
292 p2 = p1 + elen[i] - 1;
293 pn = p1;
294 for(h = 0, d = 0, p = p1; p <= p2; p++) /* scan Ei */
295 {
296 e = Ci[p];
297 if(w[e] != 0) /* e is an unabsorbed element */
298 {
299 dext = w[e] - mark; /* dext = |Le\Lk| */
300 if(dext > 0)
301 {
302 d += dext; /* sum up the set differences */
303 Ci[pn++] = e; /* keep e in Ei */
304 h += e; /* compute the hash of node i */
305 }
306 else
307 {
308 Cp[e] = amd_flip (k); /* aggressive absorb. e->k */
309 w[e] = 0; /* e is a dead element */
310 }
311 }
312 }
313 elen[i] = pn - p1 + 1; /* elen[i] = |Ei| */
314 p3 = pn;
315 p4 = p1 + len[i];
316 for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */
317 {
318 j = Ci[p];
319 if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */
320 d += nvj; /* degree(i) += |j| */
321 Ci[pn++] = j; /* place j in node list of i */
322 h += j; /* compute hash for node i */
323 }
324 if(d == 0) /* check for mass elimination */
325 {
326 Cp[i] = amd_flip (k); /* absorb i into k */
327 nvi = -nv[i];
328 dk -= nvi; /* |Lk| -= |i| */
329 nvk += nvi; /* |k| += nv[i] */
330 nel += nvi;
331 nv[i] = 0;
332 elen[i] = -1; /* node i is dead */
333 }
334 else
335 {
336 degree[i] = std::min<StorageIndex> (degree[i], d); /* update degree(i) */
337 Ci[pn] = Ci[p3]; /* move first node to end */
338 Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */
339 Ci[p1] = k; /* add k as 1st element in of Ei */
340 len[i] = pn - p1 + 1; /* new len of adj. list of node i */
341 h %= n; /* finalize hash of i */
342 next[i] = hhead[h]; /* place i in hash bucket */
343 hhead[h] = i;
344 last[i] = h; /* save hash of i in last[i] */
345 }
346 } /* scan2 is done */
347 degree[k] = dk; /* finalize |Lk| */
348 lemax = std::max<StorageIndex>(lemax, dk);
349 mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n); /* clear w */
350
351 /* --- Supernode detection ------------------------------------------ */
352 for(pk = pk1; pk < pk2; pk++)
353 {
354 i = Ci[pk];
355 if(nv[i] >= 0) continue; /* skip if i is dead */
356 h = last[i]; /* scan hash bucket of node i */
357 i = hhead[h];
358 hhead[h] = -1; /* hash bucket will be empty */
359 for(; i != -1 && next[i] != -1; i = next[i], mark++)
360 {
361 ln = len[i];
362 eln = elen[i];
363 for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark;
364 jlast = i;
365 for(j = next[i]; j != -1; ) /* compare i with all j */
366 {
367 ok = (len[j] == ln) && (elen[j] == eln);
368 for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++)
369 {
370 if(w[Ci[p]] != mark) ok = 0; /* compare i and j*/
371 }
372 if(ok) /* i and j are identical */
373 {
374 Cp[j] = amd_flip (i); /* absorb j into i */
375 nv[i] += nv[j];
376 nv[j] = 0;
377 elen[j] = -1; /* node j is dead */
378 j = next[j]; /* delete j from hash bucket */
379 next[jlast] = j;
380 }
381 else
382 {
383 jlast = j; /* j and i are different */
384 j = next[j];
385 }
386 }
387 }
388 }
389
390 /* --- Finalize new element------------------------------------------ */
391 for(p = pk1, pk = pk1; pk < pk2; pk++) /* finalize Lk */
392 {
393 i = Ci[pk];
394 if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
395 nv[i] = nvi; /* restore nv[i] */
396 d = degree[i] + dk - nvi; /* compute external degree(i) */
397 d = std::min<StorageIndex> (d, n - nel - nvi);
398 if(head[d] != -1) last[head[d]] = i;
399 next[i] = head[d]; /* put i back in degree list */
400 last[i] = -1;
401 head[d] = i;
402 mindeg = std::min<StorageIndex> (mindeg, d); /* find new minimum degree */
403 degree[i] = d;
404 Ci[p++] = i; /* place i in Lk */
405 }
406 nv[k] = nvk; /* # nodes absorbed into k */
407 if((len[k] = p-pk1) == 0) /* length of adj list of element k*/
408 {
409 Cp[k] = -1; /* k is a root of the tree */
410 w[k] = 0; /* k is now a dead element */
411 }
412 if(elenk != 0) cnz = p; /* free unused space in Lk */
413 }
414
415 /* --- Postordering ----------------------------------------------------- */
416 for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */
417 for(j = 0; j <= n; j++) head[j] = -1;
418 for(j = n; j >= 0; j--) /* place unordered nodes in lists */
419 {
420 if(nv[j] > 0) continue; /* skip if j is an element */
421 next[j] = head[Cp[j]]; /* place j in list of its parent */
422 head[Cp[j]] = j;
423 }
424 for(e = n; e >= 0; e--) /* place elements in lists */
425 {
426 if(nv[e] <= 0) continue; /* skip unless e is an element */
427 if(Cp[e] != -1)
428 {
429 next[e] = head[Cp[e]]; /* place e in list of its parent */
430 head[Cp[e]] = e;
431 }
432 }
433 for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */
434 {
435 if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w);
436 }
437
438 perm.indices().conservativeResize(n);
439}
void resize(Index newSize)
Definition PermutationMatrix.h:136
const IndicesType & indices() const
Definition PermutationMatrix.h:388
Index nonZeros() const
Definition SparseCompressedBase.h:56
void resizeNonZeros(Index size)
Definition SparseMatrix.h:644
const StorageIndex * innerIndexPtr() const
Definition SparseMatrix.h:157
const StorageIndex * outerIndexPtr() const
Definition SparseMatrix.h:166
Index cols() const
Definition SparseMatrix.h:138

References amd_flip(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::cols(), ei_declare_aligned_stack_constructed_variable, head(), Eigen::PermutationMatrix< SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex >::indices(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::innerIndexPtr(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::nonZeros(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::outerIndexPtr(), Eigen::PermutationBase< Derived >::resize(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::resizeNonZeros(), and sqrt().

Referenced by Eigen::AMDOrdering< StorageIndex >::operator()(), and Eigen::AMDOrdering< StorageIndex >::operator()().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ nr_etdfs()

template<typename IndexVector >
void Eigen::internal::nr_etdfs ( typename IndexVector::Scalar  n,
IndexVector &  parent,
IndexVector &  first_kid,
IndexVector &  next_kid,
IndexVector &  post,
typename IndexVector::Scalar  postnum 
)

Depth-first search from vertex n. No recursion. This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.

131{
132 typedef typename IndexVector::Scalar StorageIndex;
133 StorageIndex current = n, first, next;
134 while (postnum != n)
135 {
136 // No kid for the current node
137 first = first_kid(current);
138
139 // no kid for the current node
140 if (first == -1)
141 {
142 // Numbering this node because it has no kid
143 post(current) = postnum++;
144
145 // looking for the next kid
146 next = next_kid(current);
147 while (next == -1)
148 {
149 // No more kids : back to the parent node
150 current = parent(current);
151 // numbering the parent node
152 post(current) = postnum++;
153
154 // Get the next kid
155 next = next_kid(current);
156 }
157 // stopping criterion
158 if (postnum == n+1) return;
159
160 // Updating current node
161 current = next;
162 }
163 else
164 {
165 current = first;
166 }
167 }
168}

Referenced by treePostorder().

+ Here is the caller graph for this function:

◆ operator<<() [1/7]

std::ostream & Eigen::internal::operator<< ( std::ostream &  s,
const Packet16uc v 
)
inline
194{
195 union {
196 Packet16uc v;
197 unsigned char n[16];
198 } vt;
199 vt.v = v;
200 for (int i=0; i< 16; i++)
201 s << (int)vt.n[i] << ", ";
202 return s;
203}
__vector unsigned char Packet16uc
Definition PacketMath.h:39

◆ operator<<() [2/7]

std::ostream & Eigen::internal::operator<< ( std::ostream &  s,
const Packet2d v 
)
inline
254{
255 Packet vt;
256 vt.v2d = v;
257 s << vt.d[0] << ", " << vt.d[1];
258 return s;
259}
double d[2]
Definition PacketMath.h:53
Packet2d v2d
Definition PacketMath.h:58
Definition PacketMath.h:48

References Eigen::internal::Packet::d, and Eigen::internal::Packet::v2d.

◆ operator<<() [3/7]

std::ostream & Eigen::internal::operator<< ( std::ostream &  s,
const Packet2l v 
)
inline
238{
239 Packet vt;
240 vt.v2l = v;
241 s << vt.l[0] << ", " << vt.l[1];
242 return s;
243}
int64_t l[2]
Definition PacketMath.h:51
Packet2l v2l
Definition PacketMath.h:56

References Eigen::internal::Packet::l, and Eigen::internal::Packet::v2l.

◆ operator<<() [4/7]

std::ostream & Eigen::internal::operator<< ( std::ostream &  s,
const Packet2ul v 
)
inline
246{
247 Packet vt;
248 vt.v2ul = v;
249 s << vt.ul[0] << ", " << vt.ul[1] ;
250 return s;
251}
Packet2ul v2ul
Definition PacketMath.h:57
uint64_t ul[2]
Definition PacketMath.h:52

References Eigen::internal::Packet::ul, and Eigen::internal::Packet::v2ul.

◆ operator<<() [5/7]

std::ostream & Eigen::internal::operator<< ( std::ostream &  s,
const Packet4f v 
)
inline
206{
207 union {
208 Packet4f v;
209 float n[4];
210 } vt;
211 vt.v = v;
212 s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
213 return s;
214}
Definition PacketMath.h:44

◆ operator<<() [6/7]

std::ostream & Eigen::internal::operator<< ( std::ostream &  s,
const Packet4i v 
)
inline
217{
218 union {
219 Packet4i v;
220 int n[4];
221 } vt;
222 vt.v = v;
223 s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
224 return s;
225}
__vector int Packet4i
Definition PacketMath.h:35

◆ operator<<() [7/7]

std::ostream & Eigen::internal::operator<< ( std::ostream &  s,
const Packet4ui v 
)
inline
228{
229 union {
230 Packet4ui v;
231 unsigned int n[4];
232 } vt;
233 vt.v = v;
234 s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
235 return s;
236}
__vector unsigned int Packet4ui
Definition PacketMath.h:36

◆ ordering_helper_at_plus_a()

template<typename MatrixType >
void Eigen::internal::ordering_helper_at_plus_a ( const MatrixType &  A,
MatrixType &  symmat 
)
28{
29 MatrixType C;
30 C = A.transpose(); // NOTE: Could be costly
31 for (int i = 0; i < C.rows(); i++)
32 {
33 for (typename MatrixType::InnerIterator it(C, i); it; ++it)
34 it.valueRef() = 0.0;
35 }
36 symmat = C + A;
37}

Referenced by Eigen::AMDOrdering< StorageIndex >::operator()().

+ Here is the caller graph for this function:

◆ outer_product_selector_run() [1/2]

template<typename Dst , typename Lhs , typename Rhs , typename Func >
void Eigen::internal::outer_product_selector_run ( Dst dst,
const Lhs &  lhs,
const Rhs &  rhs,
const Func &  func,
const false_type  
)
273{
274 evaluator<Rhs> rhsEval(rhs);
275 typename nested_eval<Lhs,Rhs::SizeAtCompileTime>::type actual_lhs(lhs);
276 // FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored
277 // FIXME not very good if rhs is real and lhs complex while alpha is real too
278 const Index cols = dst.cols();
279 for (Index j=0; j<cols; ++j)
280 func(dst.col(j), rhsEval.coeff(Index(0),j) * actual_lhs);
281}
Definition Meta.h:54

Referenced by Eigen::internal::generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, OuterProduct >::addTo(), Eigen::internal::generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, OuterProduct >::evalTo(), Eigen::internal::generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, OuterProduct >::scaleAndAddTo(), and Eigen::internal::generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, OuterProduct >::subTo().

+ Here is the caller graph for this function:

◆ outer_product_selector_run() [2/2]

template<typename Dst , typename Lhs , typename Rhs , typename Func >
void Eigen::internal::outer_product_selector_run ( Dst dst,
const Lhs &  lhs,
const Rhs &  rhs,
const Func &  func,
const true_type  
)
286{
287 evaluator<Lhs> lhsEval(lhs);
288 typename nested_eval<Rhs,Lhs::SizeAtCompileTime>::type actual_rhs(rhs);
289 // FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored
290 // FIXME not very good if lhs is real and rhs complex while alpha is real too
291 const Index rows = dst.rows();
292 for (Index i=0; i<rows; ++i)
293 func(dst.row(i), lhsEval.coeff(i,Index(0)) * actual_rhs);
294}

◆ p2ui_CONJ_XOR()

uint32x2_t Eigen::internal::p2ui_CONJ_XOR ( )
inline
29 {
30 static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000 };
31 return vld1_u32( conj_XOR_DATA );
32}
unsigned __int32 uint32_t
Definition unistd.h:79

Referenced by predux_mul< Packet2cf >().

+ Here is the caller graph for this function:

◆ pabs() [1/12]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pabs ( const Packet a)
inline
190{ using std::abs; return abs(a); }

◆ pabs() [2/12]

template<>
EIGEN_STRONG_INLINE Packet16f Eigen::internal::pabs ( const Packet16f a)
649{
650 // _mm512_abs_ps intrinsic not found, so hack around it
651 return _mm512_castsi512_ps(_mm512_and_si512(_mm512_castps_si512(a), _mm512_set1_epi32(0x7fffffff)));
652}

◆ pabs() [3/12]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pabs ( const Packet2d a)
454{
455 const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
456 return _mm_and_pd(a,mask);
457}
__m128d Packet2d
Definition PacketMath.h:57

◆ pabs() [4/12]

template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pabs ( const Packet4d a)
352{
353 const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
354 return _mm256_and_pd(a,mask);
355}
__m256d Packet4d
Definition PacketMath.h:33

◆ pabs() [5/12]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pabs ( const Packet4f a)
548{ return vec_abs(a); }

Referenced by pabs< Packet4f >(), Eigen::internal::scalar_abs_op< Scalar >::packetOp(), pcos< Packet4f >(), and psin< Packet4f >().

+ Here is the caller graph for this function:

◆ pabs() [6/12]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pabs ( const Packet4f a)
364{ return vabsq_f32(a); }

◆ pabs() [7/12]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pabs ( const Packet4f a)
449{
450 const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
451 return _mm_and_ps(a,mask);
452}

◆ pabs() [8/12]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pabs ( const Packet4i a)
549{ return vec_abs(a); }

◆ pabs() [9/12]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pabs ( const Packet4i a)
365{ return vabsq_s32(a); }

◆ pabs() [10/12]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pabs ( const Packet4i a)
459{
460 #ifdef EIGEN_VECTORIZE_SSSE3
461 return _mm_abs_epi32(a);
462 #else
463 Packet4i aux = _mm_srai_epi32(a,31);
464 return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
465 #endif
466}

◆ pabs() [11/12]

template<>
EIGEN_STRONG_INLINE Packet8d Eigen::internal::pabs ( const Packet8d a)
654 {
655 // _mm512_abs_ps intrinsic not found, so hack around it
656 return _mm512_castsi512_pd(_mm512_and_si512(_mm512_castpd_si512(a),
657 _mm512_set1_epi64(0x7fffffffffffffff)));
658}

◆ pabs() [12/12]

template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pabs ( const Packet8f a)
347{
348 const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
349 return _mm256_and_ps(a,mask);
350}
__m256 Packet8f
Definition PacketMath.h:31

◆ pabs< Packet2d >()

707{ return vec_abs(a); }

◆ pabs< Packet4f >()

709{
710 Packet4f res;
711 res.v4f[0] = pabs(a.v4f[0]);
712 res.v4f[1] = pabs(a.v4f[1]);
713 return res;
714}
Packet2d v4f[2]
Definition PacketMath.h:45
EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f &a)
Definition PacketMath.h:548

References pabs(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pabs< Packet4i >()

706{ return vec_abs(a); }

◆ pacos()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pacos ( const Packet a)
382{ using std::acos; return acos(a); }
EIGEN_DEVICE_FUNC const AcosReturnType acos() const
Definition ArrayCwiseUnaryOps.h:262

References acos().

+ Here is the call graph for this function:

◆ padd() [1/2]

template<typename Packet >
DoublePacket< Packet > Eigen::internal::padd ( const DoublePacket< Packet > &  a,
const DoublePacket< Packet > &  b 
)
575{
577 res.first = padd(a.first, b.first);
578 res.second = padd(a.second,b.second);
579 return res;
580}
EIGEN_DEVICE_FUNC Packet padd(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:151
Packet second
Definition GeneralBlockPanelKernel.h:570
Packet first
Definition GeneralBlockPanelKernel.h:569
Definition GeneralBlockPanelKernel.h:568

References Eigen::internal::DoublePacket< Packet >::first, padd(), and Eigen::internal::DoublePacket< Packet >::second.

+ Here is the call graph for this function:

◆ padd() [2/2]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::padd ( const Packet a,
const Packet b 
)
inline
152 { return a+b; }

Referenced by Eigen::internal::gebp_traits< std::complex< RealScalar >, std::complex< RealScalar >, _ConjLhs, _ConjRhs >::acc(), Eigen::internal::add_assign_op< DstScalar, SrcScalar >::assignPacket(), Eigen::internal::gebp_traits< std::complex< RealScalar >, std::complex< RealScalar >, _ConjLhs, _ConjRhs >::madd(), Eigen::internal::gebp_traits< _LhsScalar, _RhsScalar, _ConjLhs, _ConjRhs >::madd(), Eigen::internal::gebp_traits< std::complex< RealScalar >, RealScalar, _ConjLhs, false >::madd_impl(), Eigen::internal::gebp_traits< RealScalar, std::complex< RealScalar >, false, _ConjRhs >::madd_impl(), Eigen::internal::gebp_kernel< LhsScalar, RhsScalar, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs >::operator()(), Eigen::internal::scalar_sum_op< LhsScalar, RhsScalar >::packetOp(), Eigen::internal::linspaced_op_impl< Scalar, Packet, false >::packetOp(), padd(), pcos< Packet4f >(), pexp< Packet4f >(), pexp< Packet8f >(), plog< Packet4f >(), plog< Packet8f >(), Eigen::internal::conj_helper< LhsScalar, RhsScalar, ConjLhs, ConjRhs >::pmadd(), pmadd(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, false, true >::pmadd(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, true, false >::pmadd(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, true, true >::pmadd(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, false, true >::pmadd(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, true, false >::pmadd(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, true, true >::pmadd(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, false, true >::pmadd(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, true, false >::pmadd(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, true, true >::pmadd(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, false, true >::pmadd(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, true, false >::pmadd(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, true, true >::pmadd(), pmadd(), Eigen::internal::conj_helper< RealScalar, std::complex< RealScalar >, false, Conj >::pmadd(), Eigen::internal::conj_helper< std::complex< RealScalar >, RealScalar, Conj, false >::pmadd(), predux< Packet16f >(), predux< Packet2cd >(), predux< Packet4cf >(), predux< Packet8d >(), predux_downto4< Packet16f >(), predux_downto4< Packet8d >(), preduxp< Packet16f >(), preduxp< Packet4f >(), preduxp< Packet8d >(), psin< Packet4f >(), psin< Packet8f >(), Eigen::internal::gebp_madd_selector< CJ, T, T, T, T >::run(), Eigen::internal::quat_product< Architecture::SSE, Derived, OtherDerived, double >::run(), and Eigen::internal::apply_rotation_in_the_plane_selector< Scalar, OtherScalar, SizeAtCompileTime, MinAlignment, true >::run().

+ Here is the caller graph for this function:

◆ padd< Packet16f >()

153 {
154 return _mm512_add_ps(a, b);
155}

◆ padd< Packet1cd >() [1/2]

285{ return Packet1cd(_mm_add_pd(a.v,b.v)); }
Definition Complex.h:250

Referenced by predux< Packet2cf >().

+ Here is the caller graph for this function:

◆ padd< Packet1cd >() [2/2]

135{ return Packet1cd(a.v + b.v); }

◆ padd< Packet2cd >()

256{ return Packet2cd(_mm256_add_pd(a.v,b.v)); }
Definition Complex.h:225

◆ padd< Packet2cf >() [1/4]

98{ return Packet2cf(a.v + b.v); }
Definition Complex.h:31

Referenced by preduxp< Packet2cf >().

+ Here is the caller graph for this function:

◆ padd< Packet2cf >() [2/4]

75{ return Packet2cf(padd<Packet4f>(a.v,b.v)); }

References padd< Packet4f >().

+ Here is the call graph for this function:

◆ padd< Packet2cf >() [3/4]

55{ return Packet2cf(_mm_add_ps(a.v,b.v)); }

◆ padd< Packet2cf >() [4/4]

134{ return Packet2cf(padd<Packet4f>(a.v, b.v)); }

References padd< Packet4f >().

+ Here is the call graph for this function:

◆ padd< Packet2d >() [1/2]

199{ return _mm_add_pd(a,b); }

Referenced by plset< Packet2d >(), predux< Packet2d >(), predux< Packet4f >(), and preduxp< Packet2d >().

+ Here is the caller graph for this function:

◆ padd< Packet2d >() [2/2]

506{ return (a + b); }

◆ padd< Packet4cf >()

50{ return Packet4cf(_mm256_add_ps(a.v,b.v)); }
Definition Complex.h:19

◆ padd< Packet4d >()

131{ return _mm256_add_pd(a,b); }

◆ padd< Packet4f >() [1/4]

349{ return a + b; }

Referenced by padd< Packet2cf >(), pdiv< Packet2cf >(), plset< Packet4f >(), pmul< Packet2cf >(), predux< Packet2cf >(), and preduxp< Packet2cf >().

+ Here is the caller graph for this function:

◆ padd< Packet4f >() [2/4]

161{ return vaddq_f32(a,b); }

◆ padd< Packet4f >() [3/4]

198{ return _mm_add_ps(a,b); }

◆ padd< Packet4f >() [4/4]

500{
501 Packet4f c;
502 c.v4f[0] = a.v4f[0] + b.v4f[0];
503 c.v4f[1] = a.v4f[1] + b.v4f[1];
504 return c;
505}

◆ padd< Packet4i >() [1/4]

350{ return a + b; }

Referenced by plset< Packet4i >(), pmadd(), predux< Packet4i >(), and preduxp< Packet4i >().

+ Here is the caller graph for this function:

◆ padd< Packet4i >() [2/4]

162{ return vaddq_s32(a,b); }

◆ padd< Packet4i >() [3/4]

200{ return _mm_add_epi32(a,b); }

◆ padd< Packet4i >() [4/4]

498{ return (a + b); }

◆ padd< Packet8d >()

158 {
159 return _mm512_add_pd(a, b);
160}

◆ padd< Packet8f >()

130{ return _mm256_add_ps(a,b); }

◆ palign()

template<int Offset, typename PacketType >
void Eigen::internal::palign ( PacketType &  first,
const PacketType &  second 
)
inline
515{
517}
Definition GenericPacketMath.h:493

References Eigen::internal::palign_impl< Offset, PacketType >::run().

+ Here is the call graph for this function:

◆ pand()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pand ( const Packet a,
const Packet b 
)
inline
198{ return a & b; }

Referenced by pand< Packet4f >(), plog< Packet4f >(), por< Packet4f >(), and pxor< Packet4f >().

+ Here is the caller graph for this function:

◆ pand< Packet16f >()

254 {
255#ifdef EIGEN_VECTORIZE_AVX512DQ
256 return _mm512_and_ps(a, b);
257#else
258 Packet16f res = _mm512_undefined_ps();
259 Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
260 Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
261 res = _mm512_insertf32x4(res, _mm_and_ps(lane0_a, lane0_b), 0);
262
263 Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
264 Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
265 res = _mm512_insertf32x4(res, _mm_and_ps(lane1_a, lane1_b), 1);
266
267 Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
268 Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
269 res = _mm512_insertf32x4(res, _mm_and_ps(lane2_a, lane2_b), 2);
270
271 Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
272 Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
273 res = _mm512_insertf32x4(res, _mm_and_ps(lane3_a, lane3_b), 3);
274
275 return res;
276#endif
277}
__m512 Packet16f
Definition PacketMath.h:31

◆ pand< Packet1cd >() [1/2]

308{ return Packet1cd(_mm_and_pd(a.v,b.v)); }

◆ pand< Packet1cd >() [2/2]

174{ return Packet1cd(vec_and(a.v,b.v)); }

◆ pand< Packet2cd >()

275{ return Packet2cd(_mm256_and_pd(a.v,b.v)); }

◆ pand< Packet2cf >() [1/4]

122{ return Packet2cf(pand<Packet4f>(a.v, b.v)); }

References pand< Packet4f >().

+ Here is the call graph for this function:

◆ pand< Packet2cf >() [2/4]

105{
106 return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
107}

◆ pand< Packet2cf >() [3/4]

85{ return Packet2cf(_mm_and_ps(a.v,b.v)); }

◆ pand< Packet2cf >() [4/4]

175{ return Packet2cf(pand<Packet4f>(a.v,b.v)); }

References pand< Packet4f >().

+ Here is the call graph for this function:

◆ pand< Packet2d >() [1/2]

291{ return _mm_and_pd(a,b); }

◆ pand< Packet2d >() [2/2]

587{ return vec_and(a, b); }

◆ pand< Packet4cf >()

70{ return Packet4cf(_mm256_and_ps(a.v,b.v)); }

◆ pand< Packet4d >()

203{ return _mm256_and_pd(a,b); }

◆ pand< Packet4f >() [1/4]

415{ return vec_and(a, b); }

Referenced by pand< Packet2cf >().

+ Here is the caller graph for this function:

◆ pand< Packet4f >() [2/4]

254{
255 return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
256}

◆ pand< Packet4f >() [3/4]

290{ return _mm_and_ps(a,b); }

◆ pand< Packet4f >() [4/4]

589{
590 Packet4f res;
591 res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
592 res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
593 return res;
594}
EIGEN_DEVICE_FUNC Packet pand(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:198

References pand(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pand< Packet4i >() [1/4]

416{ return vec_and(a, b); }

Referenced by pandnot< Packet4i >().

+ Here is the caller graph for this function:

◆ pand< Packet4i >() [2/4]

257{ return vandq_s32(a,b); }

◆ pand< Packet4i >() [3/4]

292{ return _mm_and_si128(a,b); }

◆ pand< Packet4i >() [4/4]

586{ return vec_and(a, b); }

◆ pand< Packet8d >()

280 {
281#ifdef EIGEN_VECTORIZE_AVX512DQ
282 return _mm512_and_pd(a, b);
283#else
284 Packet8d res = _mm512_undefined_pd();
285 Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
286 Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
287 res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0);
288
289 Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
290 Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
291 res = _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);
292
293 return res;
294#endif
295}
__m512d Packet8d
Definition PacketMath.h:33

◆ pand< Packet8f >()

202{ return _mm256_and_ps(a,b); }

◆ pandnot()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pandnot ( const Packet a,
const Packet b 
)
inline
210{ return a & (!b); }

Referenced by pandnot< Packet4f >().

+ Here is the caller graph for this function:

◆ pandnot< Packet16f >()

389 {
390#ifdef EIGEN_VECTORIZE_AVX512DQ
391 return _mm512_andnot_ps(a, b);
392#else
393 Packet16f res = _mm512_undefined_ps();
394 Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
395 Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
396 res = _mm512_insertf32x4(res, _mm_andnot_ps(lane0_a, lane0_b), 0);
397
398 Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
399 Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
400 res = _mm512_insertf32x4(res, _mm_andnot_ps(lane1_a, lane1_b), 1);
401
402 Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
403 Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
404 res = _mm512_insertf32x4(res, _mm_andnot_ps(lane2_a, lane2_b), 2);
405
406 Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
407 Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
408 res = _mm512_insertf32x4(res, _mm_andnot_ps(lane3_a, lane3_b), 3);
409
410 return res;
411#endif
412}

◆ pandnot< Packet1cd >() [1/2]

311{ return Packet1cd(_mm_andnot_pd(a.v,b.v)); }

◆ pandnot< Packet1cd >() [2/2]

180{ return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); }

◆ pandnot< Packet2cd >()

278{ return Packet2cd(_mm256_andnot_pd(a.v,b.v)); }

◆ pandnot< Packet2cf >() [1/4]

125{ return Packet2cf(pandnot<Packet4f>(a.v, b.v)); }

References pandnot< Packet4f >().

+ Here is the call graph for this function:

◆ pandnot< Packet2cf >() [2/4]

117{
118 return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
119}

◆ pandnot< Packet2cf >() [3/4]

88{ return Packet2cf(_mm_andnot_ps(a.v,b.v)); }

◆ pandnot< Packet2cf >() [4/4]

181{ return Packet2cf(pandnot<Packet4f>(a.v,b.v)); }

References pandnot< Packet4f >().

+ Here is the call graph for this function:

◆ pandnot< Packet2d >() [1/2]

303{ return _mm_andnot_pd(a,b); }

◆ pandnot< Packet2d >() [2/2]

617{ return vec_and(a, vec_nor(b, b)); }

◆ pandnot< Packet4cf >()

73{ return Packet4cf(_mm256_andnot_ps(a.v,b.v)); }

◆ pandnot< Packet4d >()

212{ return _mm256_andnot_pd(a,b); }

◆ pandnot< Packet4f >() [1/4]

424{ return vec_and(a, vec_nor(b, b)); }

Referenced by pandnot< Packet2cf >().

+ Here is the caller graph for this function:

◆ pandnot< Packet4f >() [2/4]

272{
273 return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
274}

◆ pandnot< Packet4f >() [3/4]

302{ return _mm_andnot_ps(a,b); }

◆ pandnot< Packet4f >() [4/4]

619{
620 Packet4f res;
621 res.v4f[0] = pandnot(a.v4f[0], b.v4f[0]);
622 res.v4f[1] = pandnot(a.v4f[1], b.v4f[1]);
623 return res;
624}
EIGEN_DEVICE_FUNC Packet pandnot(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:210

References pandnot(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pandnot< Packet4i >() [1/4]

425{ return vec_and(a, vec_nor(b, b)); }

◆ pandnot< Packet4i >() [2/4]

275{ return vbicq_s32(a,b); }

◆ pandnot< Packet4i >() [3/4]

304{ return _mm_andnot_si128(a,b); }

◆ pandnot< Packet4i >() [4/4]

616{ return pand<Packet4i>(a, vec_nor(b, b)); }

References pand< Packet4i >().

+ Here is the call graph for this function:

◆ pandnot< Packet8d >()

415 {
416#ifdef EIGEN_VECTORIZE_AVX512DQ
417 return _mm512_andnot_pd(a, b);
418#else
419 Packet8d res = _mm512_undefined_pd();
420 Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
421 Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
422 res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane0_a, lane0_b), 0);
423
424 Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
425 Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
426 res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane1_a, lane1_b), 1);
427
428 return res;
429#endif
430}

◆ pandnot< Packet8f >()

211{ return _mm256_andnot_ps(a,b); }

◆ parallelize_gemm()

template<bool Condition, typename Functor , typename Index >
void Eigen::internal::parallelize_gemm ( const Functor &  func,
Index  rows,
Index  cols,
Index  depth,
bool  transpose 
)
87{
88 // TODO when EIGEN_USE_BLAS is defined,
89 // we should still enable OMP for other scalar types
90#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
91 // FIXME the transpose variable is only needed to properly split
92 // the matrix product when multithreading is enabled. This is a temporary
93 // fix to support row-major destination matrices. This whole
94 // parallelizer mechanism has to be redisigned anyway.
96 EIGEN_UNUSED_VARIABLE(transpose);
97 func(0,rows, 0,cols);
98#else
99
100 // Dynamically check whether we should enable or disable OpenMP.
101 // The conditions are:
102 // - the max number of threads we can create is greater than 1
103 // - we are not already in a parallel code
104 // - the sizes are large enough
105
106 // compute the maximal number of threads from the size of the product:
107 // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
108 Index size = transpose ? rows : cols;
109 Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
110
111 // compute the maximal number of threads from the total amount of work:
112 double work = static_cast<double>(rows) * static_cast<double>(cols) *
113 static_cast<double>(depth);
114 double kMinTaskSize = 50000; // FIXME improve this heuristic.
115 pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
116
117 // compute the number of threads we are going to use
118 Index threads = std::min<Index>(nbThreads(), pb_max_threads);
119
120 // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
121 // then abort multi-threading
122 // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
123 if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
124 return func(0,rows, 0,cols);
125
127 func.initParallelSession(threads);
128
129 if(transpose)
130 std::swap(rows,cols);
131
133
134 #pragma omp parallel num_threads(threads)
135 {
136 Index i = omp_get_thread_num();
137 // Note that the actual number of threads might be lower than the number of request ones.
138 Index actual_threads = omp_get_num_threads();
139
140 Index blockCols = (cols / actual_threads) & ~Index(0x3);
141 Index blockRows = (rows / actual_threads);
142 blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
143
144 Index r0 = i*blockRows;
145 Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
146
147 Index c0 = i*blockCols;
148 Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
149
150 info[i].lhs_start = r0;
151 info[i].lhs_length = actualBlockRows;
152
153 if(transpose) func(c0, actualBlockCols, 0, rows, info);
154 else func(0, rows, c0, actualBlockCols, info);
155 }
156#endif
157}
void initParallel()
Definition Parallelizer.h:48
int nbThreads()
Definition Parallelizer.h:58
Definition Parallelizer.h:75

References ei_declare_aligned_stack_constructed_variable, EIGEN_UNUSED_VARIABLE, Eigen::initParallel(), and Eigen::nbThreads().

Referenced by Eigen::internal::generic_product_impl< Lhs, Rhs, DenseShape, DenseShape, GemmProduct >::scaleAndAddTo().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ parg()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::parg ( const Packet a)
inline
194{ using numext::arg; return arg(a); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArgReturnType arg() const
Definition ArrayCwiseUnaryOps.h:57

References arg().

Referenced by Eigen::internal::scalar_arg_op< Scalar >::packetOp().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ partial_lu_inplace()

template<typename MatrixType , typename TranspositionType >
void Eigen::internal::partial_lu_inplace ( MatrixType &  lu,
TranspositionType &  row_transpositions,
typename TranspositionType::StorageIndex &  nb_transpositions 
)
503{
504 eigen_assert(lu.cols() == row_transpositions.size());
505 eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);
506
508 <typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndex>
509 ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);
510}
Definition PartialPivLU.h:344

References Eigen::ColMajor, eigen_assert, Eigen::RowMajor, and Eigen::RowMajorBit.

Referenced by Eigen::PartialPivLU< _MatrixType >::compute().

+ Here is the caller graph for this function:

◆ pasin()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pasin ( const Packet a)
378{ using std::asin; return asin(a); }
EIGEN_DEVICE_FUNC const AsinReturnType asin() const
Definition ArrayCwiseUnaryOps.h:276

References asin().

+ Here is the call graph for this function:

◆ patan()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::patan ( const Packet a)
386{ using std::atan; return atan(a); }
EIGEN_DEVICE_FUNC const AtanReturnType atan() const
Definition ArrayCwiseUnaryOps.h:248

References atan().

+ Here is the call graph for this function:

◆ pblend() [1/15]

template<>
EIGEN_STRONG_INLINE Packet16f Eigen::internal::pblend ( const Selector< 16 > &  ,
const Packet16f ,
const Packet16f  
)
1300 {
1301 assert(false && "To be implemented");
1302 return Packet16f();
1303}

◆ pblend() [2/15]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pblend ( const Selector< 2 > &  ifPacket,
const Packet2cf thenPacket,
const Packet2cf elsePacket 
)
442 {
443 __m128d result = pblend<Packet2d>(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v));
444 return Packet2cf(_mm_castpd_ps(result));
445}
Packet4f v
Definition Complex.h:34

◆ pblend() [3/15]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pblend ( const Selector< 2 > &  ifPacket,
const Packet2cf thenPacket,
const Packet2cf elsePacket 
)
386 {
387 Packet2cf result;
388 const Selector<4> ifPacket4 = { ifPacket.select[0], ifPacket.select[0], ifPacket.select[1], ifPacket.select[1] };
389 result.v = pblend<Packet4f>(ifPacket4, thenPacket.v, elsePacket.v);
390 return result;
391}
bool select[N]
Definition GenericPacketMath.h:553
Definition GenericPacketMath.h:552

References Eigen::internal::Selector< N >::select, and Eigen::internal::Packet2cf::v.

◆ pblend() [4/15]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pblend ( const Selector< 2 > &  ifPacket,
const Packet2d thenPacket,
const Packet2d elsePacket 
)
822 {
823 const __m128d zero = _mm_setzero_pd();
824 const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]);
825 __m128d false_mask = _mm_cmpeq_pd(select, zero);
826#ifdef EIGEN_VECTORIZE_SSE4_1
827 return _mm_blendv_pd(thenPacket, elsePacket, false_mask);
828#else
829 return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket));
830#endif
831}

References Eigen::internal::Selector< N >::select.

◆ pblend() [5/15]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pblend ( const Selector< 2 > &  ifPacket,
const Packet2d thenPacket,
const Packet2d elsePacket 
)
935 {
936 Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };
937 Packet2ul mask = vec_cmpeq(select, reinterpret_cast<Packet2ul>(p2l_ONE));
938 return vec_sel(elsePacket, thenPacket, mask);
939}
__vector unsigned long long Packet2ul
Definition PacketMath.h:41

References Eigen::internal::Selector< N >::select.

◆ pblend() [6/15]

template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4d thenPacket,
const Packet4d elsePacket 
)
606 {
607 const __m256d zero = _mm256_setzero_pd();
608 const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
609 __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
610 return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
611}

References Eigen::internal::Selector< N >::select.

◆ pblend() [7/15]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4f thenPacket,
const Packet4f elsePacket 
)
765 {
766 Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
767 Packet4ui mask = reinterpret_cast<Packet4ui>(vec_cmpeq(reinterpret_cast<Packet4ui>(select), reinterpret_cast<Packet4ui>(p4i_ONE)));
768 return vec_sel(elsePacket, thenPacket, mask);
769}

References Eigen::internal::Selector< N >::select.

◆ pblend() [8/15]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4f thenPacket,
const Packet4f elsePacket 
)
812 {
813 const __m128 zero = _mm_setzero_ps();
814 const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
815 __m128 false_mask = _mm_cmpeq_ps(select, zero);
816#ifdef EIGEN_VECTORIZE_SSE4_1
817 return _mm_blendv_ps(thenPacket, elsePacket, false_mask);
818#else
819 return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket));
820#endif
821}

References Eigen::internal::Selector< N >::select.

◆ pblend() [9/15]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4f thenPacket,
const Packet4f elsePacket 
)
924 {
925 Packet2ul select_hi = { ifPacket.select[0], ifPacket.select[1] };
926 Packet2ul select_lo = { ifPacket.select[2], ifPacket.select[3] };
927 Packet2ul mask_hi = vec_cmpeq(select_hi, reinterpret_cast<Packet2ul>(p2l_ONE));
928 Packet2ul mask_lo = vec_cmpeq(select_lo, reinterpret_cast<Packet2ul>(p2l_ONE));
929 Packet4f result;
930 result.v4f[0] = vec_sel(elsePacket.v4f[0], thenPacket.v4f[0], mask_hi);
931 result.v4f[1] = vec_sel(elsePacket.v4f[1], thenPacket.v4f[1], mask_lo);
932 return result;
933}

References Eigen::internal::Selector< N >::select, and Eigen::internal::Packet4f::v4f.

◆ pblend() [10/15]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4i thenPacket,
const Packet4i elsePacket 
)
759 {
760 Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
761 Packet4ui mask = reinterpret_cast<Packet4ui>(vec_cmpeq(reinterpret_cast<Packet4ui>(select), reinterpret_cast<Packet4ui>(p4i_ONE)));
762 return vec_sel(elsePacket, thenPacket, mask);
763}

References Eigen::internal::Selector< N >::select.

Referenced by pinsertfirst(), and pinsertlast().

+ Here is the caller graph for this function:

◆ pblend() [11/15]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4i thenPacket,
const Packet4i elsePacket 
)
802 {
803 const __m128i zero = _mm_setzero_si128();
804 const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
805 __m128i false_mask = _mm_cmpeq_epi32(select, zero);
806#ifdef EIGEN_VECTORIZE_SSE4_1
807 return _mm_blendv_epi8(thenPacket, elsePacket, false_mask);
808#else
809 return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket));
810#endif
811}

References Eigen::internal::Selector< N >::select.

◆ pblend() [12/15]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pblend ( const Selector< 4 > &  ifPacket,
const Packet4i thenPacket,
const Packet4i elsePacket 
)
918 {
919 Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };
920 Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));
921 return vec_sel(elsePacket, thenPacket, mask);
922}

References Eigen::internal::Selector< N >::select.

◆ pblend() [13/15]

template<>
EIGEN_STRONG_INLINE Packet8d Eigen::internal::pblend ( const Selector< 8 > &  ,
const Packet8d ,
const Packet8d  
)
1307 {
1308 assert(false && "To be implemented");
1309 return Packet8d();
1310}

◆ pblend() [14/15]

template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pblend ( const Selector< 8 > &  ifPacket,
const Packet8f thenPacket,
const Packet8f elsePacket 
)
600 {
601 const __m256 zero = _mm256_setzero_ps();
602 const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
603 __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
604 return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
605}

References Eigen::internal::Selector< N >::select.

◆ pblend() [15/15]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pblend ( const Selector< unpacket_traits< Packet >::size > &  ifPacket,
const Packet thenPacket,
const Packet elsePacket 
)
inline
557 {
558 return ifPacket.select[0] ? thenPacket : elsePacket;
559}

◆ pbroadcast2()

template<typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::pbroadcast2 ( const typename unpacket_traits< Packet >::type *  a,
Packet a0,
Packet a1 
)
inline
275{
276 a0 = pload1<Packet>(a+0);
277 a1 = pload1<Packet>(a+1);
278}

◆ pbroadcast4()

template<typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::pbroadcast4 ( const typename unpacket_traits< Packet >::type *  a,
Packet a0,
Packet a1,
Packet a2,
Packet a3 
)
inline
258{
259 a0 = pload1<Packet>(a+0);
260 a1 = pload1<Packet>(a+1);
261 a2 = pload1<Packet>(a+2);
262 a3 = pload1<Packet>(a+3);
263}

Referenced by Eigen::internal::gebp_traits< _LhsScalar, _RhsScalar, _ConjLhs, _ConjRhs >::broadcastRhs(), Eigen::internal::gebp_traits< std::complex< RealScalar >, RealScalar, _ConjLhs, false >::broadcastRhs(), and Eigen::internal::gebp_traits< RealScalar, std::complex< RealScalar >, false, _ConjRhs >::broadcastRhs().

+ Here is the caller graph for this function:

◆ pbroadcast4< Packet2d >() [1/2]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet2d > ( const double *  a,
Packet2d a0,
Packet2d a1,
Packet2d a2,
Packet2d a3 
)
483{
484#ifdef EIGEN_VECTORIZE_SSE3
485 a0 = _mm_loaddup_pd(a+0);
486 a1 = _mm_loaddup_pd(a+1);
487 a2 = _mm_loaddup_pd(a+2);
488 a3 = _mm_loaddup_pd(a+3);
489#else
490 a1 = pload<Packet2d>(a);
491 a0 = vec2d_swizzle1(a1, 0,0);
492 a1 = vec2d_swizzle1(a1, 1,1);
493 a3 = pload<Packet2d>(a+2);
494 a2 = vec2d_swizzle1(a3, 0,0);
495 a3 = vec2d_swizzle1(a3, 1,1);
496#endif
497}
#define vec2d_swizzle1(v, p, q)
Definition PacketMath.h:70

References pload< Packet2d >(), and vec2d_swizzle1.

+ Here is the call graph for this function:

◆ pbroadcast4< Packet2d >() [2/2]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet2d > ( const double *  a,
Packet2d a0,
Packet2d a1,
Packet2d a2,
Packet2d a3 
)
433{
434 a1 = pload<Packet2d>(a);
435 a0 = vec_splat(a1, 0);
436 a1 = vec_splat(a1, 1);
437 a3 = pload<Packet2d>(a+2);
438 a2 = vec_splat(a3, 0);
439 a3 = vec_splat(a3, 1);
440}

References pload< Packet2d >().

+ Here is the call graph for this function:

◆ pbroadcast4< Packet4f >() [1/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet4f > ( const float *  a,
Packet4f a0,
Packet4f a1,
Packet4f a2,
Packet4f a3 
)
291{
292 a3 = pload<Packet4f>(a);
293 a0 = vec_splat(a3, 0);
294 a1 = vec_splat(a3, 1);
295 a2 = vec_splat(a3, 2);
296 a3 = vec_splat(a3, 3);
297}

References pload< Packet4f >().

+ Here is the call graph for this function:

◆ pbroadcast4< Packet4f >() [2/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet4f > ( const float *  a,
Packet4f a0,
Packet4f a1,
Packet4f a2,
Packet4f a3 
)
473{
474 a3 = pload<Packet4f>(a);
475 a0 = vec4f_swizzle1(a3, 0,0,0,0);
476 a1 = vec4f_swizzle1(a3, 1,1,1,1);
477 a2 = vec4f_swizzle1(a3, 2,2,2,2);
478 a3 = vec4f_swizzle1(a3, 3,3,3,3);
479}
#define vec4f_swizzle1(v, p, q, r, s)
Definition PacketMath.h:64

References pload< Packet4f >(), and vec4f_swizzle1.

+ Here is the call graph for this function:

◆ pbroadcast4< Packet4f >() [3/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet4f > ( const float *  a,
Packet4f a0,
Packet4f a1,
Packet4f a2,
Packet4f a3 
)
422{
423 a3 = pload<Packet4f>(a);
424 a0 = vec_splat_packet4f<0>(a3);
425 a1 = vec_splat_packet4f<1>(a3);
426 a2 = vec_splat_packet4f<2>(a3);
427 a3 = vec_splat_packet4f<3>(a3);
428}

References pload< Packet4f >().

+ Here is the call graph for this function:

◆ pbroadcast4< Packet4i >() [1/2]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet4i > ( const int *  a,
Packet4i a0,
Packet4i a1,
Packet4i a2,
Packet4i a3 
)
301{
302 a3 = pload<Packet4i>(a);
303 a0 = vec_splat(a3, 0);
304 a1 = vec_splat(a3, 1);
305 a2 = vec_splat(a3, 2);
306 a3 = vec_splat(a3, 3);
307}

References pload< Packet4i >().

+ Here is the call graph for this function:

◆ pbroadcast4< Packet4i >() [2/2]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pbroadcast4< Packet4i > ( const int *  a,
Packet4i a0,
Packet4i a1,
Packet4i a2,
Packet4i a3 
)
411{
412 a3 = pload<Packet4i>(a);
413 a0 = vec_splat(a3, 0);
414 a1 = vec_splat(a3, 1);
415 a2 = vec_splat(a3, 2);
416 a3 = vec_splat(a3, 3);
417}

References pload< Packet4i >().

+ Here is the call graph for this function:

◆ pcast() [1/3]

template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket Eigen::internal::pcast ( const SrcPacket &  a)
inline
134 {
135 return static_cast<TgtPacket>(a);
136}

◆ pcast() [2/3]

template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket Eigen::internal::pcast ( const SrcPacket &  a,
const SrcPacket &   
)
inline
139 {
140 return static_cast<TgtPacket>(a);
141}

◆ pcast() [3/3]

template<typename SrcPacket , typename TgtPacket >
EIGEN_DEVICE_FUNC TgtPacket Eigen::internal::pcast ( const SrcPacket &  a,
const SrcPacket &  ,
const SrcPacket &  ,
const SrcPacket &   
)
inline
145 {
146 return static_cast<TgtPacket>(a);
147}

◆ pcast< Packet2d, Packet4f >()

63 {
64 return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6));
65}

◆ pcast< Packet4f, Packet2d >()

67 {
68 // Simply discard the second half of the input
69 return _mm_cvtps_pd(a);
70}

◆ pcast< Packet4f, Packet4i >()

55 {
56 return _mm_cvttps_epi32(a);
57}

◆ pcast< Packet4i, Packet4f >()

59 {
60 return _mm_cvtepi32_ps(a);
61}

◆ pcast< Packet8f, Packet8i >()

39 {
40 return _mm256_cvtps_epi32(a);
41}

◆ pcast< Packet8i, Packet8f >()

43 {
44 return _mm256_cvtepi32_ps(a);
45}

◆ pceil()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pceil ( const Packet a)
436{ using numext::ceil; return ceil(a); }
EIGEN_DEVICE_FUNC const CeilReturnType ceil() const
Definition ArrayCwiseUnaryOps.h:402

References ceil(), and Eigen::numext::ceil().

+ Here is the call graph for this function:

◆ pceil< Packet2d >()

641{ return vec_ceil(a); }

◆ pceil< Packet4d >()

197{ return _mm256_ceil_pd(a); }

◆ pceil< Packet4f >() [1/2]

428{ return vec_ceil(a); }

◆ pceil< Packet4f >() [2/2]

635{
636 Packet4f res;
637 res.v4f[0] = vec_ceil(a.v4f[0]);
638 res.v4f[1] = vec_ceil(a.v4f[1]);
639 return res;
640}

References Eigen::internal::Packet4f::v4f.

◆ pceil< Packet8f >()

196{ return _mm256_ceil_ps(a); }

◆ pconj() [1/25]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pconj ( const Packet a)
inline
166{ return numext::conj(a); }

◆ pconj() [2/25]

template<>
EIGEN_STRONG_INLINE Packet16f Eigen::internal::pconj ( const Packet16f a)
183 {
184 return a;
185}

◆ pconj() [3/25]

template<>
EIGEN_STRONG_INLINE Packet16i Eigen::internal::pconj ( const Packet16i a)
191 {
192 return a;
193}

◆ pconj() [4/25]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pconj ( const Packet1cd a)
289{
290 const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));
291 return Packet1cd(_mm_xor_pd(a.v,mask));
292}

◆ pconj() [5/25]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pconj ( const Packet1cd a)
140{ return Packet1cd((Packet2d)vec_xor((Packet2d)a.v, (Packet2d)p2ul_CONJ_XOR2)); }

References p2ul_CONJ_XOR2.

◆ pconj() [6/25]

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pconj ( const Packet2cd a)
260{
261 const __m256d mask = _mm256_castsi256_pd(_mm256_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0));
262 return Packet2cd(_mm256_xor_pd(a.v,mask));
263}

◆ pconj() [7/25]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pconj ( const Packet2cf a)
101{ return Packet2cf(pxor<Packet4f>(a.v, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR))); }

References p4ui_CONJ_XOR, and pxor< Packet4f >().

Referenced by Eigen::internal::gebp_traits< std::complex< RealScalar >, std::complex< RealScalar >, _ConjLhs, _ConjRhs >::acc(), Eigen::internal::scalar_conjugate_op< Scalar >::packetOp(), pconj(), Eigen::internal::conj_if< true >::pconj(), pdiv< Packet2cd >(), pdiv< Packet4cf >(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, false, true >::pmul(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, true, false >::pmul(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, true, true >::pmul(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, false, true >::pmul(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, true, false >::pmul(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, true, true >::pmul(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, false, true >::pmul(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, true, false >::pmul(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, true, true >::pmul(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, false, true >::pmul(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, true, false >::pmul(), and Eigen::internal::conj_helper< Packet4cf, Packet4cf, true, true >::pmul().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pconj() [8/25]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pconj ( const Packet2cf a)
79{
80 Packet4ui b = vreinterpretq_u32_f32(a.v);
81 return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR())));
82}

References p4ui_CONJ_XOR.

◆ pconj() [9/25]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pconj ( const Packet2cf a)
63{
64 const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));
65 return Packet2cf(_mm_xor_ps(a.v,mask));
66}

◆ pconj() [10/25]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pconj ( const Packet2cf a)
142{
143 Packet2cf res;
144 res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0]))).v;
145 res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1]))).v;
146 return res;
147}
EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf &a)
Definition Complex.h:101

References pconj(), Eigen::internal::Packet2cf::v, and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pconj() [11/25]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pconj ( const Packet2d a)
222{ return a; }

◆ pconj() [12/25]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pconj ( const Packet2d a)
550{ return a; }

◆ pconj() [13/25]

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pconj ( const Packet4cf a)
57{
58 const __m256 mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000));
59 return Packet4cf(_mm256_xor_ps(a.v,mask));
60}

◆ pconj() [14/25]

template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pconj ( const Packet4d a)
146{ return a; }

◆ pconj() [15/25]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pconj ( const Packet4f a)
358{ return a; }

◆ pconj() [16/25]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pconj ( const Packet4f a)
170{ return a; }

◆ pconj() [17/25]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pconj ( const Packet4f a)
221{ return a; }

◆ pconj() [18/25]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pconj ( const Packet4f a)
549{ return a; }

◆ pconj() [19/25]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pconj ( const Packet4i a)
359{ return a; }

◆ pconj() [20/25]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pconj ( const Packet4i a)
171{ return a; }

◆ pconj() [21/25]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pconj ( const Packet4i a)
223{ return a; }

◆ pconj() [22/25]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pconj ( const Packet4i a)
548{ return a; }

◆ pconj() [23/25]

template<>
EIGEN_STRONG_INLINE Packet8d Eigen::internal::pconj ( const Packet8d a)
187 {
188 return a;
189}

◆ pconj() [24/25]

template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pconj ( const Packet8f a)
145{ return a; }

◆ pconj() [25/25]

template<>
EIGEN_STRONG_INLINE Packet8i Eigen::internal::pconj ( const Packet8i a)
147{ return a; }

◆ pcos()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pcos ( const Packet a)
370{ using std::cos; return cos(a); }
EIGEN_DEVICE_FUNC const CosReturnType cos() const
Definition ArrayCwiseUnaryOps.h:202

References cos().

+ Here is the call graph for this function:

◆ pcos< Packet4f >()

360{
361 Packet4f x = _x;
364
369
370 _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
371 _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
372 _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
373 _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
374 _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f);
375 _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
376 _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f);
377 _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
378 _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
379 _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
380
381 Packet4f xmm1, xmm2, xmm3, y;
382 Packet4i emm0, emm2;
383
384 x = pabs(x);
385
386 /* scale by 4/Pi */
387 y = pmul(x, p4f_cephes_FOPI);
388
389 /* get the integer part of y */
390 emm2 = _mm_cvttps_epi32(y);
391 /* j=(j+1) & (~1) (see the cephes sources) */
392 emm2 = _mm_add_epi32(emm2, p4i_1);
393 emm2 = _mm_and_si128(emm2, p4i_not1);
394 y = _mm_cvtepi32_ps(emm2);
395
396 emm2 = _mm_sub_epi32(emm2, p4i_2);
397
398 /* get the swap sign flag */
399 emm0 = _mm_andnot_si128(emm2, p4i_4);
400 emm0 = _mm_slli_epi32(emm0, 29);
401 /* get the polynom selection mask */
402 emm2 = _mm_and_si128(emm2, p4i_2);
403 emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
404
405 Packet4f sign_bit = _mm_castsi128_ps(emm0);
406 Packet4f poly_mask = _mm_castsi128_ps(emm2);
407
408 /* The magic pass: "Extended precision modular arithmetic"
409 x = ((x - y * DP1) - y * DP2) - y * DP3; */
410 xmm1 = pmul(y, p4f_minus_cephes_DP1);
411 xmm2 = pmul(y, p4f_minus_cephes_DP2);
412 xmm3 = pmul(y, p4f_minus_cephes_DP3);
413 x = padd(x, xmm1);
414 x = padd(x, xmm2);
415 x = padd(x, xmm3);
416
417 /* Evaluate the first polynom (0 <= x <= Pi/4) */
418 y = p4f_coscof_p0;
419 Packet4f z = pmul(x,x);
420
421 y = pmadd(y,z,p4f_coscof_p1);
422 y = pmadd(y,z,p4f_coscof_p2);
423 y = pmul(y, z);
424 y = pmul(y, z);
425 Packet4f tmp = _mm_mul_ps(z, p4f_half);
426 y = psub(y, tmp);
427 y = padd(y, p4f_1);
428
429 /* Evaluate the second polynom (Pi/4 <= x <= 0) */
430 Packet4f y2 = p4f_sincof_p0;
431 y2 = pmadd(y2, z, p4f_sincof_p1);
432 y2 = pmadd(y2, z, p4f_sincof_p2);
433 y2 = pmul(y2, z);
434 y2 = pmadd(y2, x, x);
435
436 /* select the correct result from the two polynoms */
437 y2 = _mm_and_ps(poly_mask, y2);
438 y = _mm_andnot_ps(poly_mask, y);
439 y = _mm_or_ps(y,y2);
440
441 /* update the sign */
442 return _mm_xor_ps(y, sign_bit);
443}
#define _EIGEN_DECLARE_CONST_Packet4i(NAME, X)
Definition PacketMath.h:53
#define _EIGEN_DECLARE_CONST_Packet4f(NAME, X)
Definition PacketMath.h:50
EIGEN_DEVICE_FUNC Packet psub(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:156
Definition Half.h:80

References _EIGEN_DECLARE_CONST_Packet4f, _EIGEN_DECLARE_CONST_Packet4i, pabs(), padd(), pmadd(), pmul(), psub(), and y.

+ Here is the call graph for this function:

◆ pcosh()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pcosh ( const Packet a)
394{ using std::cosh; return cosh(a); }
EIGEN_DEVICE_FUNC const CoshReturnType cosh() const
Definition ArrayCwiseUnaryOps.h:318

References cosh().

+ Here is the call graph for this function:

◆ pcplxflip() [1/3]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pcplxflip ( const Packet a)
inline
353{
354 // FIXME: uncomment the following in case we drop the internal imag and real functions.
355// using std::imag;
356// using std::real;
357 return Packet(imag(a),real(a));
358}
EIGEN_DEVICE_FUNC RealReturnType real() const
Definition CommonCwiseUnaryOps.h:86
EIGEN_DEVICE_FUNC const ImagReturnType imag() const
Definition CommonCwiseUnaryOps.h:95

References imag(), and real().

+ Here is the call graph for this function:

◆ pcplxflip() [2/3]

EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pcplxflip ( const Packet1cd x)
428{
429 return Packet1cd(preverse(Packet2d(x.v)));
430}
EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf &a)
Definition Complex.h:137

References preverse().

+ Here is the call graph for this function:

◆ pcplxflip() [3/3]

EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pcplxflip ( const Packet2cf x)
243{
244 return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2));
245}

References vec4f_swizzle1.

Referenced by Eigen::internal::gebp_traits< std::complex< RealScalar >, std::complex< RealScalar >, _ConjLhs, _ConjRhs >::acc().

+ Here is the caller graph for this function:

◆ pcplxflip< Packet2cd >()

398{
399 return Packet2cd(_mm256_shuffle_pd(x.v, x.v, 0x5));
400}

◆ pcplxflip< Packet2cf >() [1/2]

167{
168 return Packet2cf(vrev64q_f32(a.v));
169}

◆ pcplxflip< Packet2cf >() [2/2]

238{
239 return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV));
240}

References p16uc_COMPLEX32_REV.

◆ pcplxflip< Packet4cf >()

219{
220 return Packet4cf(_mm256_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1)));
221}

◆ pdiv()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pdiv ( const Packet a,
const Packet b 
)
inline
176 { return a/b; }

Referenced by Eigen::internal::div_assign_op< DstScalar, SrcScalar >::assignPacket(), generic_fast_tanh_float(), Eigen::internal::scalar_quotient_op< LhsScalar, RhsScalar >::packetOp(), pdiv< Packet1cd >(), pdiv< Packet2cf >(), pexp< Packet2d >(), and prsqrt().

+ Here is the caller graph for this function:

◆ pdiv< Packet16f >()

208 {
209 return _mm512_div_ps(a, b);
210}

◆ pdiv< Packet1cd >() [1/2]

420{
421 // TODO optimize it for SSE3 and 4
423 __m128d s = _mm_mul_pd(b.v,b.v);
424 return Packet1cd(_mm_div_pd(res.v, _mm_add_pd(s,_mm_shuffle_pd(s, s, 0x1))));
425}
__m128d v
Definition Complex.h:253
Definition BlasUtil.h:62
EIGEN_STRONG_INLINE Scalar pmul(const LhsScalar &x, const RhsScalar &y) const
Definition BlasUtil.h:68

References Eigen::internal::conj_helper< LhsScalar, RhsScalar, ConjLhs, ConjRhs >::pmul(), and Eigen::internal::Packet1cd::v.

Referenced by pdiv< Packet2cf >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pdiv< Packet1cd >() [2/2]

343{
344 // TODO optimize it for AltiVec
346 Packet2d s = vec_madd(b.v, b.v, p2d_ZERO_);
347 return Packet1cd(pdiv(res.v, s + vec_perm(s, s, p16uc_REVERSE64)));
348}

References p16uc_REVERSE64, p2d_ZERO_, pdiv(), Eigen::internal::conj_helper< LhsScalar, RhsScalar, ConjLhs, ConjRhs >::pmul(), and Eigen::internal::Packet1cd::v.

+ Here is the call graph for this function:

◆ pdiv< Packet2cd >()

390{
391 Packet2cd num = pmul(a, pconj(b));
392 __m256d tmp = _mm256_mul_pd(b.v, b.v);
393 __m256d denom = _mm256_hadd_pd(tmp, tmp);
394 return Packet2cd(_mm256_div_pd(num.v, denom));
395}
__m256d v
Definition Complex.h:228

References pconj(), pmul(), and Eigen::internal::Packet2cd::v.

+ Here is the call graph for this function:

◆ pdiv< Packet2cf >() [1/4]

230{
231 // TODO optimize it for AltiVec
233 Packet4f s = pmul<Packet4f>(b.v, b.v);
234 return Packet2cf(pdiv(res.v, padd<Packet4f>(s, vec_perm(s, s, p16uc_COMPLEX32_REV))));
235}

References p16uc_COMPLEX32_REV, padd< Packet4f >(), pdiv(), Eigen::internal::conj_helper< LhsScalar, RhsScalar, ConjLhs, ConjRhs >::pmul(), pmul< Packet4f >(), and Eigen::internal::Packet2cf::v.

+ Here is the call graph for this function:

◆ pdiv< Packet2cf >() [2/4]

271{
272 // TODO optimize it for NEON
274 Packet4f s, rev_s;
275
276 // this computes the norm
277 s = vmulq_f32(b.v, b.v);
278 rev_s = vrev64q_f32(s);
279
280 return Packet2cf(pdiv<Packet4f>(res.v, vaddq_f32(s,rev_s)));
281}

References pdiv< Packet4f >(), and Eigen::internal::conj_helper< LhsScalar, RhsScalar, ConjLhs, ConjRhs >::pmul().

+ Here is the call graph for this function:

◆ pdiv< Packet2cf >() [3/4]

235{
236 // TODO optimize it for SSE3 and 4
238 __m128 s = _mm_mul_ps(b.v,b.v);
239 return Packet2cf(_mm_div_ps(res.v,_mm_add_ps(s,_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(s), 0xb1)))));
240}

References Eigen::internal::conj_helper< LhsScalar, RhsScalar, ConjLhs, ConjRhs >::pmul().

+ Here is the call graph for this function:

◆ pdiv< Packet2cf >() [4/4]

351{
352 // TODO optimize it for AltiVec
353 Packet2cf res;
354 res.cd[0] = pdiv<Packet1cd>(a.cd[0], b.cd[0]);
355 res.cd[1] = pdiv<Packet1cd>(a.cd[1], b.cd[1]);
356 return res;
357}

References pdiv< Packet1cd >().

+ Here is the call graph for this function:

◆ pdiv< Packet2d >() [1/2]

244{ return _mm_div_pd(a,b); }

◆ pdiv< Packet2d >() [2/2]

536{ return (a / b); }

◆ pdiv< Packet4cf >()

210{
211 Packet4cf num = pmul(a, pconj(b));
212 __m256 tmp = _mm256_mul_ps(b.v, b.v);
213 __m256 tmp2 = _mm256_shuffle_ps(tmp,tmp,0xB1);
214 __m256 denom = _mm256_add_ps(tmp, tmp2);
215 return Packet4cf(_mm256_div_ps(num.v, denom));
216}
__m256 v
Definition Complex.h:22

References pconj(), pmul(), and Eigen::internal::Packet4cf::v.

+ Here is the call graph for this function:

◆ pdiv< Packet4d >()

154{ return _mm256_div_pd(a,b); }

◆ pdiv< Packet4f >() [1/4]

365{
366#ifndef __VSX__ // VSX actually provides a div instruction
367 Packet4f t, y_0, y_1;
368
369 // Altivec does not offer a divide instruction, we have to do a reciprocal approximation
370 y_0 = vec_re(b);
371
372 // Do one Newton-Raphson iteration to get the needed accuracy
373 t = vec_nmsub(y_0, b, p4f_ONE);
374 y_1 = vec_madd(y_0, t, y_0);
375
376 return vec_madd(a, y_1, p4f_MZERO);
377#else
378 return vec_div(a, b);
379#endif
380}

References p4f_MZERO, and p4f_ONE.

Referenced by pdiv< Packet2cf >().

+ Here is the caller graph for this function:

◆ pdiv< Packet4f >() [2/4]

177{
178#if EIGEN_ARCH_ARM64
179 return vdivq_f32(a,b);
180#else
181 Packet4f inv, restep, div;
182
183 // NEON does not offer a divide instruction, we have to do a reciprocal approximation
184 // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers
185 // a reciprocal estimate AND a reciprocal step -which saves a few instructions
186 // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with
187 // Newton-Raphson and vrecpsq_f32()
188 inv = vrecpeq_f32(b);
189
190 // This returns a differential, by which we will have to multiply inv to get a better
191 // approximation of 1/b.
192 restep = vrecpsq_f32(b, inv);
193 inv = vmulq_f32(restep, inv);
194
195 // Finally, multiply a by 1/b and get the wanted result of the division.
196 div = vmulq_f32(a, inv);
197
198 return div;
199#endif
200}

◆ pdiv< Packet4f >() [3/4]

243{ return _mm_div_ps(a,b); }

◆ pdiv< Packet4f >() [4/4]

530{
531 Packet4f c;
532 c.v4f[0] = a.v4f[0] / b.v4f[0];
533 c.v4f[1] = a.v4f[1] / b.v4f[1];
534 return c;
535}

◆ pdiv< Packet4i >() [1/3]

383{ eigen_assert(false && "packet integer division are not supported by AltiVec");
384 return pset1<Packet4i>(0);
385}

References eigen_assert, and pset1< Packet4i >().

+ Here is the call graph for this function:

◆ pdiv< Packet4i >() [2/3]

203{ eigen_assert(false && "packet integer division are not supported by NEON");
204 return pset1<Packet4i>(0);
205}

References eigen_assert, and pset1< Packet4i >().

+ Here is the call graph for this function:

◆ pdiv< Packet4i >() [3/3]

528{ return (a / b); }

◆ pdiv< Packet8d >()

213 {
214 return _mm512_div_pd(a, b);
215}

◆ pdiv< Packet8f >()

153{ return _mm256_div_ps(a,b); }

◆ pdiv< Packet8i >()

156{ eigen_assert(false && "packet integer division are not supported by AVX");
157 return pset1<Packet8i>(0);
158}

References eigen_assert, and pset1< Packet8i >().

+ Here is the call graph for this function:

◆ permute_symm_to_fullsymm()

template<int Mode, typename MatrixType , int DestOrder>
void Eigen::internal::permute_symm_to_fullsymm ( const MatrixType &  mat,
SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &  _dest,
const typename MatrixType::StorageIndex *  perm = 0 
)
428{
429 typedef typename MatrixType::StorageIndex StorageIndex;
430 typedef typename MatrixType::Scalar Scalar;
432 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
433 typedef evaluator<MatrixType> MatEval;
434 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
435
436 MatEval matEval(mat);
437 Dest& dest(_dest.derived());
438 enum {
439 StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
440 };
441
442 Index size = mat.rows();
443 VectorI count;
444 count.resize(size);
445 count.setZero();
446 dest.resize(size,size);
447 for(Index j = 0; j<size; ++j)
448 {
449 Index jp = perm ? perm[j] : j;
450 for(MatIterator it(matEval,j); it; ++it)
451 {
452 Index i = it.index();
453 Index r = it.row();
454 Index c = it.col();
455 Index ip = perm ? perm[i] : i;
456 if(Mode==(Upper|Lower))
457 count[StorageOrderMatch ? jp : ip]++;
458 else if(r==c)
459 count[ip]++;
460 else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
461 {
462 count[ip]++;
463 count[jp]++;
464 }
465 }
466 }
467 Index nnz = count.sum();
468
469 // reserve space
470 dest.resizeNonZeros(nnz);
471 dest.outerIndexPtr()[0] = 0;
472 for(Index j=0; j<size; ++j)
473 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
474 for(Index j=0; j<size; ++j)
475 count[j] = dest.outerIndexPtr()[j];
476
477 // copy data
478 for(StorageIndex j = 0; j<size; ++j)
479 {
480 for(MatIterator it(matEval,j); it; ++it)
481 {
482 StorageIndex i = internal::convert_index<StorageIndex>(it.index());
483 Index r = it.row();
484 Index c = it.col();
485
486 StorageIndex jp = perm ? perm[j] : j;
487 StorageIndex ip = perm ? perm[i] : i;
488
489 if(Mode==(Upper|Lower))
490 {
491 Index k = count[StorageOrderMatch ? jp : ip]++;
492 dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
493 dest.valuePtr()[k] = it.value();
494 }
495 else if(r==c)
496 {
497 Index k = count[ip]++;
498 dest.innerIndexPtr()[k] = ip;
499 dest.valuePtr()[k] = it.value();
500 }
501 else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
502 {
503 if(!StorageOrderMatch)
504 std::swap(ip,jp);
505 Index k = count[jp]++;
506 dest.innerIndexPtr()[k] = ip;
507 dest.valuePtr()[k] = it.value();
508 k = count[ip]++;
509 dest.innerIndexPtr()[k] = jp;
510 dest.valuePtr()[k] = numext::conj(it.value());
511 }
512 }
513 }
514}
const Derived & derived() const
Definition SparseMatrixBase.h:138
A versatible sparse matrix representation.
Definition SparseMatrix.h:98
constexpr auto size(const C &c) -> decltype(c.size())
Definition span.hpp:183
IGL_INLINE void count(const Eigen::SparseMatrix< XType > &X, const int dim, Eigen::SparseVector< SType > &S)
Definition count.cpp:12

References Eigen::SparseMatrixBase< Derived >::derived(), Eigen::Lower, permute_symm_to_fullsymm(), and Eigen::Upper.

Referenced by permute_symm_to_fullsymm().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ permute_symm_to_symm() [1/2]

template<int SrcMode, int DstMode, typename MatrixType , int DestOrder>
void Eigen::internal::permute_symm_to_symm ( const MatrixType &  mat,
SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &  _dest,
const typename MatrixType::StorageIndex *  perm = 0 
)

Referenced by permute_symm_to_symm().

+ Here is the caller graph for this function:

◆ permute_symm_to_symm() [2/2]

template<int _SrcMode, int _DstMode, typename MatrixType , int DstOrder>
void Eigen::internal::permute_symm_to_symm ( const MatrixType &  mat,
SparseMatrix< typename MatrixType::Scalar, DstOrder, typename MatrixType::StorageIndex > &  _dest,
const typename MatrixType::StorageIndex *  perm 
)
518{
519 typedef typename MatrixType::StorageIndex StorageIndex;
520 typedef typename MatrixType::Scalar Scalar;
522 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
523 typedef evaluator<MatrixType> MatEval;
524 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
525
526 enum {
527 SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
528 StorageOrderMatch = int(SrcOrder) == int(DstOrder),
529 DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
530 SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
531 };
532
533 MatEval matEval(mat);
534
535 Index size = mat.rows();
536 VectorI count(size);
537 count.setZero();
538 dest.resize(size,size);
539 for(StorageIndex j = 0; j<size; ++j)
540 {
541 StorageIndex jp = perm ? perm[j] : j;
542 for(MatIterator it(matEval,j); it; ++it)
543 {
544 StorageIndex i = it.index();
545 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
546 continue;
547
548 StorageIndex ip = perm ? perm[i] : i;
549 count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
550 }
551 }
552 dest.outerIndexPtr()[0] = 0;
553 for(Index j=0; j<size; ++j)
554 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
555 dest.resizeNonZeros(dest.outerIndexPtr()[size]);
556 for(Index j=0; j<size; ++j)
557 count[j] = dest.outerIndexPtr()[j];
558
559 for(StorageIndex j = 0; j<size; ++j)
560 {
561
562 for(MatIterator it(matEval,j); it; ++it)
563 {
564 StorageIndex i = it.index();
565 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
566 continue;
567
568 StorageIndex jp = perm ? perm[j] : j;
569 StorageIndex ip = perm? perm[i] : i;
570
571 Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
572 dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
573
574 if(!StorageOrderMatch) std::swap(ip,jp);
575 if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
576 dest.valuePtr()[k] = numext::conj(it.value());
577 else
578 dest.valuePtr()[k] = it.value();
579 }
580 }
581}

References Eigen::ColMajor, Eigen::SparseMatrixBase< Derived >::derived(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::innerIndexPtr(), Eigen::Lower, Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::outerIndexPtr(), permute_symm_to_symm(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::resize(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::resizeNonZeros(), Eigen::RowMajor, Eigen::Upper, and Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::valuePtr().

+ Here is the call graph for this function:

◆ pexp()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pexp ( const Packet a)
402{ using std::exp; return exp(a); }
EIGEN_DEVICE_FUNC const ExpReturnType exp() const
Definition ArrayCwiseUnaryOps.h:88

References exp().

Referenced by Eigen::internal::scalar_exp_op< Scalar >::packetOp().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pexp< Packet2d >() [1/2]

173{
174 Packet2d x = _x;
175
179
180 _EIGEN_DECLARE_CONST_Packet2d(exp_hi, 709.437);
181 _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);
182
183 _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);
184
185 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);
186 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);
187 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);
188
189 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);
190 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);
191 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);
192 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);
193
194 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);
195 _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);
196 static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);
197
198 Packet2d tmp, fx;
199 Packet4i emm0;
200
201 // clamp x
202 x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
203 /* express exp(x) as exp(g + n*log(2)) */
204 fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
205
206#ifdef EIGEN_VECTORIZE_SSE4_1
207 fx = _mm_floor_pd(fx);
208#else
209 emm0 = _mm_cvttpd_epi32(fx);
210 tmp = _mm_cvtepi32_pd(emm0);
211 /* if greater, substract 1 */
212 Packet2d mask = _mm_cmpgt_pd(tmp, fx);
213 mask = _mm_and_pd(mask, p2d_1);
214 fx = psub(tmp, mask);
215#endif
216
217 tmp = pmul(fx, p2d_cephes_exp_C1);
218 Packet2d z = pmul(fx, p2d_cephes_exp_C2);
219 x = psub(x, tmp);
220 x = psub(x, z);
221
222 Packet2d x2 = pmul(x,x);
223
224 Packet2d px = p2d_cephes_exp_p0;
225 px = pmadd(px, x2, p2d_cephes_exp_p1);
226 px = pmadd(px, x2, p2d_cephes_exp_p2);
227 px = pmul (px, x);
228
229 Packet2d qx = p2d_cephes_exp_q0;
230 qx = pmadd(qx, x2, p2d_cephes_exp_q1);
231 qx = pmadd(qx, x2, p2d_cephes_exp_q2);
232 qx = pmadd(qx, x2, p2d_cephes_exp_q3);
233
234 x = pdiv(px,psub(qx,px));
235 x = pmadd(p2d_2,x,p2d_1);
236
237 // build 2^n
238 emm0 = _mm_cvttpd_epi32(fx);
239 emm0 = _mm_add_epi32(emm0, p4i_1023_0);
240 emm0 = _mm_slli_epi32(emm0, 20);
241 emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));
242 return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
243}
#define _EIGEN_DECLARE_CONST_Packet2d(NAME, X)
Definition PacketMath.h:56

References _EIGEN_DECLARE_CONST_Packet2d, pdiv(), pmadd(), pmax(), pmin(), pmul(), and psub().

Referenced by pexp< Packet4f >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pexp< Packet2d >() [2/2]

46{
47 Packet2d x = _x;
48
49 Packet2d tmp, fx;
50 Packet2l emm0;
51
52 // clamp x
53 x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);
54 /* express exp(x) as exp(g + n*log(2)) */
55 fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);
56
57 fx = vec_floor(fx);
58
59 tmp = pmul(fx, p2d_cephes_exp_C1);
60 Packet2d z = pmul(fx, p2d_cephes_exp_C2);
61 x = psub(x, tmp);
62 x = psub(x, z);
63
64 Packet2d x2 = pmul(x,x);
65
66 Packet2d px = p2d_cephes_exp_p0;
67 px = pmadd(px, x2, p2d_cephes_exp_p1);
68 px = pmadd(px, x2, p2d_cephes_exp_p2);
69 px = pmul (px, x);
70
71 Packet2d qx = p2d_cephes_exp_q0;
72 qx = pmadd(qx, x2, p2d_cephes_exp_q1);
73 qx = pmadd(qx, x2, p2d_cephes_exp_q2);
74 qx = pmadd(qx, x2, p2d_cephes_exp_q3);
75
76 x = pdiv(px,psub(qx,px));
77 x = pmadd(p2d_2,x,p2d_1);
78
79 // build 2^n
80 emm0 = vec_ctsl(fx, 0);
81
82 static const Packet2l p2l_1023 = { 1023, 1023 };
83 static const Packet2ul p2ul_52 = { 52, 52 };
84
85 emm0 = emm0 + p2l_1023;
86 emm0 = emm0 << reinterpret_cast<Packet2l>(p2ul_52);
87
88 // Altivec's max & min operators just drop silent NaNs. Check NaNs in
89 // inputs and return them unmodified.
90 Packet2ul isnumber_mask = reinterpret_cast<Packet2ul>(vec_cmpeq(_x, _x));
91 return vec_sel(_x, pmax(pmul(x, reinterpret_cast<Packet2d>(emm0)), _x),
92 isnumber_mask);
93}
__vector long long Packet2l
Definition PacketMath.h:42

References pdiv(), pmadd(), pmax(), pmin(), pmul(), and psub().

+ Here is the call graph for this function:

◆ pexp< Packet4d >()

277 {
278 Packet4d x = _x;
279
283
284 _EIGEN_DECLARE_CONST_Packet4d(exp_hi, 709.437);
285 _EIGEN_DECLARE_CONST_Packet4d(exp_lo, -709.436139303);
286
287 _EIGEN_DECLARE_CONST_Packet4d(cephes_LOG2EF, 1.4426950408889634073599);
288
289 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p0, 1.26177193074810590878e-4);
290 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p1, 3.02994407707441961300e-2);
291 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p2, 9.99999999999999999910e-1);
292
293 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q0, 3.00198505138664455042e-6);
294 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q1, 2.52448340349684104192e-3);
295 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q2, 2.27265548208155028766e-1);
296 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q3, 2.00000000000000000009e0);
297
298 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C1, 0.693145751953125);
299 _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C2, 1.42860682030941723212e-6);
301
302 Packet4d tmp, fx;
303
304 // clamp x
305 x = pmax(pmin(x, p4d_exp_hi), p4d_exp_lo);
306 // Express exp(x) as exp(g + n*log(2)).
307 fx = pmadd(p4d_cephes_LOG2EF, x, p4d_half);
308
309 // Get the integer modulus of log(2), i.e. the "n" described above.
310 fx = _mm256_floor_pd(fx);
311
312 // Get the remainder modulo log(2), i.e. the "g" described above. Subtract
313 // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last
314 // digits right.
315 tmp = pmul(fx, p4d_cephes_exp_C1);
316 Packet4d z = pmul(fx, p4d_cephes_exp_C2);
317 x = psub(x, tmp);
318 x = psub(x, z);
319
320 Packet4d x2 = pmul(x, x);
321
322 // Evaluate the numerator polynomial of the rational interpolant.
323 Packet4d px = p4d_cephes_exp_p0;
324 px = pmadd(px, x2, p4d_cephes_exp_p1);
325 px = pmadd(px, x2, p4d_cephes_exp_p2);
326 px = pmul(px, x);
327
328 // Evaluate the denominator polynomial of the rational interpolant.
329 Packet4d qx = p4d_cephes_exp_q0;
330 qx = pmadd(qx, x2, p4d_cephes_exp_q1);
331 qx = pmadd(qx, x2, p4d_cephes_exp_q2);
332 qx = pmadd(qx, x2, p4d_cephes_exp_q3);
333
334 // I don't really get this bit, copied from the SSE2 routines, so...
335 // TODO(gonnet): Figure out what is going on here, perhaps find a better
336 // rational interpolant?
337 x = _mm256_div_pd(px, psub(qx, px));
338 x = pmadd(p4d_2, x, p4d_1);
339
340 // Build e=2^n by constructing the exponents in a 128-bit vector and
341 // shifting them to where they belong in double-precision values.
342 __m128i emm0 = _mm256_cvtpd_epi32(fx);
343 emm0 = _mm_add_epi32(emm0, p4i_1023);
344 emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(3, 1, 2, 0));
345 __m128i lo = _mm_slli_epi64(emm0, 52);
346 __m128i hi = _mm_slli_epi64(_mm_srli_epi64(emm0, 32), 52);
347 __m256i e = _mm256_insertf128_si256(_mm256_setzero_si256(), lo, 0);
348 e = _mm256_insertf128_si256(e, hi, 1);
349
350 // Construct the result 2^n * exp(g) = e * x. The max is used to catch
351 // non-finite values in the input.
352 return pmax(pmul(x, _mm256_castsi256_pd(e)), _x);
353}
#define _EIGEN_DECLARE_CONST_Packet4d(NAME, X)
Definition PacketMath.h:42

References _EIGEN_DECLARE_CONST_Packet4d, _EIGEN_DECLARE_CONST_Packet4i, pmadd(), pmax(), pmin(), pmul(), and psub().

+ Here is the call graph for this function:

◆ pexp< Packet4f >() [1/4]

157{
158 Packet4f x = _x;
159
160 Packet4f tmp, fx;
161 Packet4i emm0;
162
163 // clamp x
164 x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
165
166 // express exp(x) as exp(g + n*log(2))
167 fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
168
169 fx = pfloor(fx);
170
171 tmp = pmul(fx, p4f_cephes_exp_C1);
172 Packet4f z = pmul(fx, p4f_cephes_exp_C2);
173 x = psub(x, tmp);
174 x = psub(x, z);
175
176 z = pmul(x,x);
177
178 Packet4f y = p4f_cephes_exp_p0;
179 y = pmadd(y, x, p4f_cephes_exp_p1);
180 y = pmadd(y, x, p4f_cephes_exp_p2);
181 y = pmadd(y, x, p4f_cephes_exp_p3);
182 y = pmadd(y, x, p4f_cephes_exp_p4);
183 y = pmadd(y, x, p4f_cephes_exp_p5);
184 y = pmadd(y, z, x);
185 y = padd(y, p4f_1);
186
187 // build 2^n
188 emm0 = vec_cts(fx, 0);
189 emm0 = vec_add(emm0, p4i_0x7f);
190 emm0 = vec_sl(emm0, reinterpret_cast<Packet4ui>(p4i_23));
191
192 // Altivec's max & min operators just drop silent NaNs. Check NaNs in
193 // inputs and return them unmodified.
194 Packet4ui isnumber_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(_x, _x));
195 return vec_sel(_x, pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x),
196 isnumber_mask);
197}
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet pfloor(const Packet &a)
Definition GenericPacketMath.h:432

References padd(), pfloor(), pmadd(), pmax(), pmin(), pmul(), psub(), and y.

+ Here is the call graph for this function:

◆ pexp< Packet4f >() [2/4]

21{
22 Packet4f x = _x;
23 Packet4f tmp, fx;
24
28 _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
29 _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
30 _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
31 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
32 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
33 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
34 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
35 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
36 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
37 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
38 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
39
40 x = vminq_f32(x, p4f_exp_hi);
41 x = vmaxq_f32(x, p4f_exp_lo);
42
43 /* express exp(x) as exp(g + n*log(2)) */
44 fx = vmlaq_f32(p4f_half, x, p4f_cephes_LOG2EF);
45
46 /* perform a floorf */
47 tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));
48
49 /* if greater, substract 1 */
50 Packet4ui mask = vcgtq_f32(tmp, fx);
51 mask = vandq_u32(mask, vreinterpretq_u32_f32(p4f_1));
52
53 fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));
54
55 tmp = vmulq_f32(fx, p4f_cephes_exp_C1);
56 Packet4f z = vmulq_f32(fx, p4f_cephes_exp_C2);
57 x = vsubq_f32(x, tmp);
58 x = vsubq_f32(x, z);
59
60 Packet4f y = vmulq_f32(p4f_cephes_exp_p0, x);
61 z = vmulq_f32(x, x);
62 y = vaddq_f32(y, p4f_cephes_exp_p1);
63 y = vmulq_f32(y, x);
64 y = vaddq_f32(y, p4f_cephes_exp_p2);
65 y = vmulq_f32(y, x);
66 y = vaddq_f32(y, p4f_cephes_exp_p3);
67 y = vmulq_f32(y, x);
68 y = vaddq_f32(y, p4f_cephes_exp_p4);
69 y = vmulq_f32(y, x);
70 y = vaddq_f32(y, p4f_cephes_exp_p5);
71
72 y = vmulq_f32(y, z);
73 y = vaddq_f32(y, x);
74 y = vaddq_f32(y, p4f_1);
75
76 /* build 2^n */
77 int32x4_t mm;
78 mm = vcvtq_s32_f32(fx);
79 mm = vaddq_s32(mm, p4i_0x7f);
80 mm = vshlq_n_s32(mm, 23);
81 Packet4f pow2n = vreinterpretq_f32_s32(mm);
82
83 y = vmulq_f32(y, pow2n);
84 return y;
85}

References _EIGEN_DECLARE_CONST_Packet4f, _EIGEN_DECLARE_CONST_Packet4i, and y.

◆ pexp< Packet4f >() [3/4]

108{
109 Packet4f x = _x;
113
114
115 _EIGEN_DECLARE_CONST_Packet4f(exp_hi, 88.3762626647950f);
116 _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);
117
118 _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);
119 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);
120 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);
121
122 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);
123 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);
124 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);
125 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);
126 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);
127 _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);
128
129 Packet4f tmp, fx;
130 Packet4i emm0;
131
132 // clamp x
133 x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);
134
135 /* express exp(x) as exp(g + n*log(2)) */
136 fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);
137
138#ifdef EIGEN_VECTORIZE_SSE4_1
139 fx = _mm_floor_ps(fx);
140#else
141 emm0 = _mm_cvttps_epi32(fx);
142 tmp = _mm_cvtepi32_ps(emm0);
143 /* if greater, substract 1 */
144 Packet4f mask = _mm_cmpgt_ps(tmp, fx);
145 mask = _mm_and_ps(mask, p4f_1);
146 fx = psub(tmp, mask);
147#endif
148
149 tmp = pmul(fx, p4f_cephes_exp_C1);
150 Packet4f z = pmul(fx, p4f_cephes_exp_C2);
151 x = psub(x, tmp);
152 x = psub(x, z);
153
154 z = pmul(x,x);
155
156 Packet4f y = p4f_cephes_exp_p0;
157 y = pmadd(y, x, p4f_cephes_exp_p1);
158 y = pmadd(y, x, p4f_cephes_exp_p2);
159 y = pmadd(y, x, p4f_cephes_exp_p3);
160 y = pmadd(y, x, p4f_cephes_exp_p4);
161 y = pmadd(y, x, p4f_cephes_exp_p5);
162 y = pmadd(y, z, x);
163 y = padd(y, p4f_1);
164
165 // build 2^n
166 emm0 = _mm_cvttps_epi32(fx);
167 emm0 = _mm_add_epi32(emm0, p4i_0x7f);
168 emm0 = _mm_slli_epi32(emm0, 23);
169 return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);
170}

References _EIGEN_DECLARE_CONST_Packet4f, _EIGEN_DECLARE_CONST_Packet4i, padd(), pmadd(), pmax(), pmin(), pmul(), psub(), and y.

+ Here is the call graph for this function:

◆ pexp< Packet4f >() [4/4]

97{
98 Packet4f res;
99 res.v4f[0] = pexp<Packet2d>(x.v4f[0]);
100 res.v4f[1] = pexp<Packet2d>(x.v4f[1]);
101 return res;
102}

References pexp< Packet2d >(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pexp< Packet8f >()

209 {
213
214 _EIGEN_DECLARE_CONST_Packet8f(exp_hi, 88.3762626647950f);
215 _EIGEN_DECLARE_CONST_Packet8f(exp_lo, -88.3762626647949f);
216
217 _EIGEN_DECLARE_CONST_Packet8f(cephes_LOG2EF, 1.44269504088896341f);
218
219 _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p0, 1.9875691500E-4f);
220 _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p1, 1.3981999507E-3f);
221 _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p2, 8.3334519073E-3f);
222 _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p3, 4.1665795894E-2f);
223 _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p4, 1.6666665459E-1f);
224 _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p5, 5.0000001201E-1f);
225
226 // Clamp x.
227 Packet8f x = pmax(pmin(_x, p8f_exp_hi), p8f_exp_lo);
228
229 // Express exp(x) as exp(m*ln(2) + r), start by extracting
230 // m = floor(x/ln(2) + 0.5).
231 Packet8f m = _mm256_floor_ps(pmadd(x, p8f_cephes_LOG2EF, p8f_half));
232
233// Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is
234// subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating
235// truncation errors. Note that we don't use the "pmadd" function here to
236// ensure that a precision-preserving FMA instruction is used.
237#ifdef EIGEN_VECTORIZE_FMA
238 _EIGEN_DECLARE_CONST_Packet8f(nln2, -0.6931471805599453f);
239 Packet8f r = _mm256_fmadd_ps(m, p8f_nln2, x);
240#else
241 _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C1, 0.693359375f);
242 _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C2, -2.12194440e-4f);
243 Packet8f r = psub(x, pmul(m, p8f_cephes_exp_C1));
244 r = psub(r, pmul(m, p8f_cephes_exp_C2));
245#endif
246
247 Packet8f r2 = pmul(r, r);
248
249 // TODO(gonnet): Split into odd/even polynomials and try to exploit
250 // instruction-level parallelism.
251 Packet8f y = p8f_cephes_exp_p0;
252 y = pmadd(y, r, p8f_cephes_exp_p1);
253 y = pmadd(y, r, p8f_cephes_exp_p2);
254 y = pmadd(y, r, p8f_cephes_exp_p3);
255 y = pmadd(y, r, p8f_cephes_exp_p4);
256 y = pmadd(y, r, p8f_cephes_exp_p5);
257 y = pmadd(y, r2, r);
258 y = padd(y, p8f_1);
259
260 // Build emm0 = 2^m.
261 Packet8i emm0 = _mm256_cvttps_epi32(padd(m, p8f_127));
262 emm0 = pshiftleft(emm0, 23);
263
264 // Return 2^m * exp(r).
265 return pmax(pmul(y, _mm256_castsi256_ps(emm0)), _x);
266}
#define _EIGEN_DECLARE_CONST_Packet8f(NAME, X)
Definition PacketMath.h:39
Packet8i pshiftleft(Packet8i v, int n)
Definition MathFunctions.h:21
__vector short int Packet8i
Definition PacketMath.h:38

References _EIGEN_DECLARE_CONST_Packet8f, padd(), pmadd(), pmax(), pmin(), pmul(), pshiftleft(), psub(), and y.

+ Here is the call graph for this function:

◆ pfirst()

◆ pfirst< Packet16f >()

626 {
627 return _mm_cvtss_f32(_mm512_extractf32x4_ps(a, 0));
628}

◆ pfirst< Packet16i >()

634 {
635 return _mm_extract_epi32(_mm512_extracti32x4_epi32(a, 0), 0);
636}

◆ pfirst< Packet1cd >() [1/2]

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::pfirst< Packet1cd > ( const Packet1cd a)
330{
331 EIGEN_ALIGN16 double res[2];
332 _mm_store_pd(res, a.v);
333 return std::complex<double>(res[0],res[1]);
334}
#define EIGEN_ALIGN16
Definition Macros.h:753

References EIGEN_ALIGN16.

◆ pfirst< Packet1cd >() [2/2]

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::pfirst< Packet1cd > ( const Packet1cd a)
190{
191 std::complex<double> EIGEN_ALIGN16 res;
192 pstore<std::complex<double> >(&res, a);
193
194 return res;
195}

References EIGEN_ALIGN16.

◆ pfirst< Packet2cd >()

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::pfirst< Packet2cd > ( const Packet2cd a)
312{
313 __m128d low = _mm256_extractf128_pd(a.v, 0);
314 EIGEN_ALIGN16 double res[2];
315 _mm_store_pd(res, low);
316 return std::complex<double>(res[0],res[1]);
317}

References EIGEN_ALIGN16.

◆ pfirst< Packet2cf >() [1/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::pfirst< Packet2cf > ( const Packet2cf a)
130{
131 std::complex<float> EIGEN_ALIGN16 res[2];
132 pstore((float *)&res, a.v);
133
134 return res[0];
135}
EIGEN_DEVICE_FUNC void pstore(Scalar *to, const Packet &from)
Definition GenericPacketMath.h:285

References EIGEN_ALIGN16, and pstore().

Referenced by predux< Packet2cf >(), and predux_mul< Packet2cf >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pfirst< Packet2cf >() [2/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::pfirst< Packet2cf > ( const Packet2cf a)
148{
149 std::complex<float> EIGEN_ALIGN16 x[2];
150 vst1q_f32((float *)x, a.v);
151 return x[0];
152}

References EIGEN_ALIGN16.

◆ pfirst< Packet2cf >() [3/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::pfirst< Packet2cf > ( const Packet2cf a)
134{
135 #if EIGEN_GNUC_AT_MOST(4,3)
136 // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares...
137 // This workaround also fix invalid code generation with gcc 4.3
138 EIGEN_ALIGN16 std::complex<float> res[2];
139 _mm_store_ps((float*)res, a.v);
140 return res[0];
141 #else
142 std::complex<float> res;
143 _mm_storel_pi((__m64*)&res, a.v);
144 return res;
145 #endif
146}

References EIGEN_ALIGN16.

◆ pfirst< Packet2cf >() [4/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::pfirst< Packet2cf > ( const Packet2cf a)
197{
198 std::complex<float> EIGEN_ALIGN16 res[2];
199 pstore<std::complex<float> >(res, a);
200
201 return res[0];
202}

References EIGEN_ALIGN16.

◆ pfirst< Packet2d >() [1/2]

437{ return _mm_cvtsd_f64(a); }

Referenced by predux< Packet2d >(), predux_max< Packet2d >(), predux_min< Packet2d >(), and predux_mul< Packet2d >().

+ Here is the caller graph for this function:

◆ pfirst< Packet2d >() [2/2]

686{ double EIGEN_ALIGN16 x[2]; pstore(x, a); return x[0]; }

References EIGEN_ALIGN16, and pstore().

+ Here is the call graph for this function:

◆ pfirst< Packet4cf >()

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::pfirst< Packet4cf > ( const Packet4cf a)
120{
121 return pfirst(Packet2cf(_mm256_castps256_ps128(a.v)));
122}
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type pfirst(const Packet &a)
Definition GenericPacketMath.h:315

References pfirst().

+ Here is the call graph for this function:

◆ pfirst< Packet4d >()

320 {
321 return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
322}

◆ pfirst< Packet4f >() [1/4]

537{ float EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; }

References EIGEN_ALIGN16.

Referenced by predux< Packet4f >(), predux_max< Packet4f >(), predux_min< Packet4f >(), and predux_mul< Packet4f >().

+ Here is the caller graph for this function:

◆ pfirst< Packet4f >() [2/4]

342{ float EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }

References EIGEN_ALIGN16.

◆ pfirst< Packet4f >() [3/4]

436{ return _mm_cvtss_f32(a); }

◆ pfirst< Packet4f >() [4/4]

685{ float EIGEN_ALIGN16 x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; }

References EIGEN_ALIGN16.

◆ pfirst< Packet4i >() [1/4]

538{ int EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; }

References EIGEN_ALIGN16.

Referenced by predux< Packet4i >(), predux_max< Packet4i >(), and predux_min< Packet4i >().

+ Here is the caller graph for this function:

◆ pfirst< Packet4i >() [2/4]

343{ int32_t EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }
__int32 int32_t
Definition unistd.h:75

References EIGEN_ALIGN16.

◆ pfirst< Packet4i >() [3/4]

438{ return _mm_cvtsi128_si32(a); }

◆ pfirst< Packet4i >() [4/4]

684{ int EIGEN_ALIGN16 x[4]; pstore(x, a); return x[0]; }

References EIGEN_ALIGN16, and pstore().

+ Here is the call graph for this function:

◆ pfirst< Packet8d >()

630 {
631 return _mm_cvtsd_f64(_mm256_extractf128_pd(_mm512_extractf64x4_pd(a, 0), 0));
632}

◆ pfirst< Packet8f >()

317 {
318 return _mm_cvtss_f32(_mm256_castps256_ps128(a));
319}

◆ pfirst< Packet8i >()

323 {
324 return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
325}

◆ pfloor()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pfloor ( const Packet a)
432{ using numext::floor; return floor(a); }
EIGEN_DEVICE_FUNC const FloorReturnType floor() const
Definition ArrayCwiseUnaryOps.h:388

References floor(), and Eigen::numext::floor().

Referenced by pexp< Packet4f >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pfloor< Packet2d >()

649{ return vec_floor(a); }

◆ pfloor< Packet4d >()

200{ return _mm256_floor_pd(a); }

◆ pfloor< Packet4f >() [1/2]

429{ return vec_floor(a); }

◆ pfloor< Packet4f >() [2/2]

643{
644 Packet4f res;
645 res.v4f[0] = vec_floor(a.v4f[0]);
646 res.v4f[1] = vec_floor(a.v4f[1]);
647 return res;
648}

References Eigen::internal::Packet4f::v4f.

◆ pfloor< Packet8f >()

199{ return _mm256_floor_ps(a); }

◆ pgather()

template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pgather ( const Scalar *  from,
Index   
)
inline
293 { return ploadu<Packet>(from); }

◆ pgather< double, Packet2d >() [1/2]

template<>
EIGEN_DEVICE_FUNC Packet2d Eigen::internal::pgather< double, Packet2d > ( const double *  from,
Index  stride 
)
inline
371{
372 return _mm_set_pd(from[1*stride], from[0*stride]);
373}

◆ pgather< double, Packet2d >() [2/2]

template<>
EIGEN_DEVICE_FUNC Packet2d Eigen::internal::pgather< double, Packet2d > ( const double *  from,
Index  stride 
)
inline
463{
464 double EIGEN_ALIGN16 af[2];
465 af[0] = from[0*stride];
466 af[1] = from[1*stride];
467 return pload<Packet2d>(af);
468}

References EIGEN_ALIGN16, and pload< Packet2d >().

+ Here is the call graph for this function:

◆ pgather< double, Packet4d >()

template<>
EIGEN_DEVICE_FUNC Packet4d Eigen::internal::pgather< double, Packet4d > ( const double *  from,
Index  stride 
)
inline
267{
268 return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
269}

◆ pgather< double, Packet8d >()

template<>
EIGEN_DEVICE_FUNC Packet8d Eigen::internal::pgather< double, Packet8d > ( const double *  from,
Index  stride 
)
inline
577 {
578 Packet8i stride_vector = _mm256_set1_epi32(stride);
579 Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
580 Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
581
582 return _mm512_i32gather_pd(indices, from, 8);
583}

◆ pgather< float, Packet16f >()

template<>
EIGEN_DEVICE_FUNC Packet16f Eigen::internal::pgather< float, Packet16f > ( const float *  from,
Index  stride 
)
inline
567 {
568 Packet16i stride_vector = _mm512_set1_epi32(stride);
569 Packet16i stride_multiplier =
570 _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
571 Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
572
573 return _mm512_i32gather_ps(indices, from, 4);
574}
__m512i Packet16i
Definition PacketMath.h:32

◆ pgather< float, Packet4f >() [1/4]

template<>
EIGEN_DEVICE_FUNC Packet4f Eigen::internal::pgather< float, Packet4f > ( const float *  from,
Index  stride 
)
inline
310{
311 float EIGEN_ALIGN16 af[4];
312 af[0] = from[0*stride];
313 af[1] = from[1*stride];
314 af[2] = from[2*stride];
315 af[3] = from[3*stride];
316 return pload<Packet4f>(af);
317}

References EIGEN_ALIGN16, and pload< Packet4f >().

+ Here is the call graph for this function:

◆ pgather< float, Packet4f >() [2/4]

template<>
EIGEN_DEVICE_FUNC Packet4f Eigen::internal::pgather< float, Packet4f > ( const float *  from,
Index  stride 
)
inline
305{
306 Packet4f res = pset1<Packet4f>(0.f);
307 res = vsetq_lane_f32(from[0*stride], res, 0);
308 res = vsetq_lane_f32(from[1*stride], res, 1);
309 res = vsetq_lane_f32(from[2*stride], res, 2);
310 res = vsetq_lane_f32(from[3*stride], res, 3);
311 return res;
312}

References pset1< Packet4f >().

+ Here is the call graph for this function:

◆ pgather< float, Packet4f >() [3/4]

template<>
EIGEN_DEVICE_FUNC Packet4f Eigen::internal::pgather< float, Packet4f > ( const float *  from,
Index  stride 
)
inline
367{
368 return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
369}

◆ pgather< float, Packet4f >() [4/4]

template<>
EIGEN_DEVICE_FUNC Packet4f Eigen::internal::pgather< float, Packet4f > ( const float *  from,
Index  stride 
)
inline
453{
454 float EIGEN_ALIGN16 ai[4];
455 ai[0] = from[0*stride];
456 ai[1] = from[1*stride];
457 ai[2] = from[2*stride];
458 ai[3] = from[3*stride];
459 return pload<Packet4f>(ai);
460}

References EIGEN_ALIGN16, and pload< Packet4f >().

+ Here is the call graph for this function:

◆ pgather< float, Packet8f >()

template<>
EIGEN_DEVICE_FUNC Packet8f Eigen::internal::pgather< float, Packet8f > ( const float *  from,
Index  stride 
)
inline
262{
263 return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
264 from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
265}

◆ pgather< int, Packet4i >() [1/3]

template<>
EIGEN_DEVICE_FUNC Packet4i Eigen::internal::pgather< int, Packet4i > ( const int *  from,
Index  stride 
)
inline
319{
320 int EIGEN_ALIGN16 ai[4];
321 ai[0] = from[0*stride];
322 ai[1] = from[1*stride];
323 ai[2] = from[2*stride];
324 ai[3] = from[3*stride];
325 return pload<Packet4i>(ai);
326}

References EIGEN_ALIGN16, and pload< Packet4i >().

+ Here is the call graph for this function:

◆ pgather< int, Packet4i >() [2/3]

template<>
EIGEN_DEVICE_FUNC Packet4i Eigen::internal::pgather< int, Packet4i > ( const int *  from,
Index  stride 
)
inline
375{
376 return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
377 }

◆ pgather< int, Packet4i >() [3/3]

template<>
EIGEN_DEVICE_FUNC Packet4i Eigen::internal::pgather< int, Packet4i > ( const int *  from,
Index  stride 
)
inline
443{
444 int EIGEN_ALIGN16 ai[4];
445 ai[0] = from[0*stride];
446 ai[1] = from[1*stride];
447 ai[2] = from[2*stride];
448 ai[3] = from[3*stride];
449 return pload<Packet4i>(ai);
450}

References EIGEN_ALIGN16, and pload< Packet4i >().

+ Here is the call graph for this function:

◆ pgather< int32_t, Packet4i >()

template<>
EIGEN_DEVICE_FUNC Packet4i Eigen::internal::pgather< int32_t, Packet4i > ( const int32_t from,
Index  stride 
)
inline
314{
315 Packet4i res = pset1<Packet4i>(0);
316 res = vsetq_lane_s32(from[0*stride], res, 0);
317 res = vsetq_lane_s32(from[1*stride], res, 1);
318 res = vsetq_lane_s32(from[2*stride], res, 2);
319 res = vsetq_lane_s32(from[3*stride], res, 3);
320 return res;
321}

References pset1< Packet4i >().

+ Here is the call graph for this function:

◆ pgather< std::complex< double >, Packet1cd >()

template<>
EIGEN_DEVICE_FUNC Packet1cd Eigen::internal::pgather< std::complex< double >, Packet1cd > ( const std::complex< double > *  from,
Index stride  EIGEN_UNUSED 
)
inline
119{
120 return pload<Packet1cd>(from);
121}

References pload< Packet1cd >().

+ Here is the call graph for this function:

◆ pgather< std::complex< double >, Packet2cd >()

template<>
EIGEN_DEVICE_FUNC Packet2cd Eigen::internal::pgather< std::complex< double >, Packet2cd > ( const std::complex< double > *  from,
Index  stride 
)
inline
298{
299 return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]),
300 std::imag(from[0*stride]), std::real(from[0*stride])));
301}

◆ pgather< std::complex< float >, Packet2cf >() [1/4]

template<>
EIGEN_DEVICE_FUNC Packet2cf Eigen::internal::pgather< std::complex< float >, Packet2cf > ( const std::complex< float > *  from,
Index  stride 
)
inline
84{
85 std::complex<float> EIGEN_ALIGN16 af[2];
86 af[0] = from[0*stride];
87 af[1] = from[1*stride];
88 return pload<Packet2cf>(af);
89}

References EIGEN_ALIGN16, and pload< Packet2cf >().

+ Here is the call graph for this function:

◆ pgather< std::complex< float >, Packet2cf >() [2/4]

template<>
EIGEN_DEVICE_FUNC Packet2cf Eigen::internal::pgather< std::complex< float >, Packet2cf > ( const std::complex< float > *  from,
Index  stride 
)
inline
130{
131 Packet4f res = pset1<Packet4f>(0.f);
132 res = vsetq_lane_f32(std::real(from[0*stride]), res, 0);
133 res = vsetq_lane_f32(std::imag(from[0*stride]), res, 1);
134 res = vsetq_lane_f32(std::real(from[1*stride]), res, 2);
135 res = vsetq_lane_f32(std::imag(from[1*stride]), res, 3);
136 return Packet2cf(res);
137}

References pset1< Packet4f >().

+ Here is the call graph for this function:

◆ pgather< std::complex< float >, Packet2cf >() [3/4]

template<>
EIGEN_DEVICE_FUNC Packet2cf Eigen::internal::pgather< std::complex< float >, Packet2cf > ( const std::complex< float > *  from,
Index  stride 
)
inline
118{
119 return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]),
120 std::imag(from[0*stride]), std::real(from[0*stride])));
121}

◆ pgather< std::complex< float >, Packet2cf >() [4/4]

template<>
EIGEN_DEVICE_FUNC Packet2cf Eigen::internal::pgather< std::complex< float >, Packet2cf > ( const std::complex< float > *  from,
Index  stride 
)
inline
112{
113 std::complex<float> EIGEN_ALIGN16 af[2];
114 af[0] = from[0*stride];
115 af[1] = from[1*stride];
116 return pload<Packet2cf>(af);
117}

References EIGEN_ALIGN16, and pload< Packet2cf >().

+ Here is the call graph for this function:

◆ pgather< std::complex< float >, Packet4cf >()

template<>
EIGEN_DEVICE_FUNC Packet4cf Eigen::internal::pgather< std::complex< float >, Packet4cf > ( const std::complex< float > *  from,
Index  stride 
)
inline
96{
97 return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]),
98 std::imag(from[2*stride]), std::real(from[2*stride]),
99 std::imag(from[1*stride]), std::real(from[1*stride]),
100 std::imag(from[0*stride]), std::real(from[0*stride])));
101}

◆ pinsertfirst() [1/9]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pinsertfirst ( const Packet a,
typename unpacket_traits< Packet >::type  b 
)
inline
564{
565 // Default implementation based on pblend.
566 // It must be specialized for higher performance.
568 mask.select[0] = true;
569 // This for loop should be optimized away by the compiler.
570 for(Index i=1; i<unpacket_traits<Packet>::size; ++i)
571 mask.select[i] = false;
572 return pblend(mask, pset1<Packet>(b), a);
573}
EIGEN_STRONG_INLINE Packet4i pblend(const Selector< 4 > &ifPacket, const Packet4i &thenPacket, const Packet4i &elsePacket)
Definition PacketMath.h:759

References pblend(), and Eigen::internal::Selector< N >::select.

+ Here is the call graph for this function:

◆ pinsertfirst() [2/9]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pinsertfirst ( const Packet1cd ,
std::complex< double >  b 
)
453{
454 return pset1<Packet1cd>(b);
455}

References pset1< Packet1cd >().

+ Here is the call graph for this function:

◆ pinsertfirst() [3/9]

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pinsertfirst ( const Packet2cd a,
std::complex< double >  b 
)
433{
434 return Packet2cd(_mm256_blend_pd(a.v,pset1<Packet2cd>(b).v,1|2));
435}

References pset1< Packet2cd >(), and Eigen::internal::Packet2cd::v.

+ Here is the call graph for this function:

◆ pinsertfirst() [4/9]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pinsertfirst ( const Packet2cf a,
std::complex< float >  b 
)
448{
449 return Packet2cf(_mm_loadl_pi(a.v, reinterpret_cast<const __m64*>(&b)));
450}

◆ pinsertfirst() [5/9]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pinsertfirst ( const Packet2d a,
double  b 
)
843{
844#ifdef EIGEN_VECTORIZE_SSE4_1
845 return _mm_blend_pd(a,pset1<Packet2d>(b),1);
846#else
847 return _mm_move_sd(a, _mm_load_sd(&b));
848#endif
849}

References pset1< Packet2d >().

+ Here is the call graph for this function:

◆ pinsertfirst() [6/9]

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pinsertfirst ( const Packet4cf a,
std::complex< float >  b 
)
428{
429 return Packet4cf(_mm256_blend_ps(a.v,pset1<Packet4cf>(b).v,1|2));
430}

References pset1< Packet4cf >(), and Eigen::internal::Packet4cf::v.

Referenced by Eigen::internal::linspaced_op_impl< Scalar, Packet, false >::packetOp().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pinsertfirst() [7/9]

template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pinsertfirst ( const Packet4d a,
double  b 
)
619{
620 return _mm256_blend_pd(a,pset1<Packet4d>(b),1);
621}

References pset1< Packet4d >().

+ Here is the call graph for this function:

◆ pinsertfirst() [8/9]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pinsertfirst ( const Packet4f a,
float  b 
)
834{
835#ifdef EIGEN_VECTORIZE_SSE4_1
836 return _mm_blend_ps(a,pset1<Packet4f>(b),1);
837#else
838 return _mm_move_ss(a, _mm_load_ss(&b));
839#endif
840}

References pset1< Packet4f >().

+ Here is the call graph for this function:

◆ pinsertfirst() [9/9]

template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pinsertfirst ( const Packet8f a,
float  b 
)
614{
615 return _mm256_blend_ps(a,pset1<Packet8f>(b),1);
616}

References pset1< Packet8f >().

+ Here is the call graph for this function:

◆ pinsertlast() [1/9]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pinsertlast ( const Packet a,
typename unpacket_traits< Packet >::type  b 
)
inline
578{
579 // Default implementation based on pblend.
580 // It must be specialized for higher performance.
582 // This for loop should be optimized away by the compiler.
583 for(Index i=0; i<unpacket_traits<Packet>::size-1; ++i)
584 mask.select[i] = false;
586 return pblend(mask, pset1<Packet>(b), a);
587}

References pblend(), and Eigen::internal::Selector< N >::select.

+ Here is the call graph for this function:

◆ pinsertlast() [2/9]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pinsertlast ( const Packet1cd ,
std::complex< double >  b 
)
463{
464 return pset1<Packet1cd>(b);
465}

References pset1< Packet1cd >().

+ Here is the call graph for this function:

◆ pinsertlast() [3/9]

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pinsertlast ( const Packet2cd a,
std::complex< double >  b 
)
443{
444 return Packet2cd(_mm256_blend_pd(a.v,pset1<Packet2cd>(b).v,(1<<3)|(1<<2)));
445}

References pset1< Packet2cd >(), and Eigen::internal::Packet2cd::v.

+ Here is the call graph for this function:

◆ pinsertlast() [4/9]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pinsertlast ( const Packet2cf a,
std::complex< float >  b 
)
458{
459 return Packet2cf(_mm_loadh_pi(a.v, reinterpret_cast<const __m64*>(&b)));
460}

◆ pinsertlast() [5/9]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pinsertlast ( const Packet2d a,
double  b 
)
862{
863#ifdef EIGEN_VECTORIZE_SSE4_1
864 return _mm_blend_pd(a,pset1<Packet2d>(b),(1<<1));
865#else
866 const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x0,0xFFFFFFFF,0xFFFFFFFF));
867 return _mm_or_pd(_mm_andnot_pd(mask, a), _mm_and_pd(mask, pset1<Packet2d>(b)));
868#endif
869}

References pset1< Packet2d >().

+ Here is the call graph for this function:

◆ pinsertlast() [6/9]

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pinsertlast ( const Packet4cf a,
std::complex< float >  b 
)
438{
439 return Packet4cf(_mm256_blend_ps(a.v,pset1<Packet4cf>(b).v,(1<<7)|(1<<6)));
440}

References pset1< Packet4cf >(), and Eigen::internal::Packet4cf::v.

Referenced by Eigen::internal::linspaced_op_impl< Scalar, Packet, false >::packetOp().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pinsertlast() [7/9]

template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pinsertlast ( const Packet4d a,
double  b 
)
629{
630 return _mm256_blend_pd(a,pset1<Packet4d>(b),(1<<3));
631}

References pset1< Packet4d >().

+ Here is the call graph for this function:

◆ pinsertlast() [8/9]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pinsertlast ( const Packet4f a,
float  b 
)
852{
853#ifdef EIGEN_VECTORIZE_SSE4_1
854 return _mm_blend_ps(a,pset1<Packet4f>(b),(1<<3));
855#else
856 const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x0,0x0,0x0,0xFFFFFFFF));
857 return _mm_or_ps(_mm_andnot_ps(mask, a), _mm_and_ps(mask, pset1<Packet4f>(b)));
858#endif
859}

References pset1< Packet4f >().

+ Here is the call graph for this function:

◆ pinsertlast() [9/9]

template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pinsertlast ( const Packet8f a,
float  b 
)
624{
625 return _mm256_blend_ps(a,pset1<Packet8f>(b),(1<<7));
626}

References pset1< Packet8f >().

+ Here is the call graph for this function:

◆ pload()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pload ( const typename unpacket_traits< Packet >::type *  from)
inline
214{ return *from; }

◆ pload1()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pload1 ( const typename unpacket_traits< Packet >::type *  a)
inline
226{ return pset1<Packet>(*a); }

◆ pload1< Packet16f >()

130 {
131 return _mm512_broadcastss_ps(_mm_load_ps1(from));
132}

◆ pload1< Packet4d >()

125{ return _mm256_broadcast_sd(from); }

◆ pload1< Packet8d >()

134 {
135 return _mm512_broadcastsd_pd(_mm_load_pd1(from));
136}

◆ pload1< Packet8f >()

124{ return _mm256_broadcast_ss(from); }

◆ pload< Packet16f >()

433 {
434 EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from);
435}
#define EIGEN_DEBUG_ALIGNED_LOAD
Definition GenericPacketMath.h:27

References EIGEN_DEBUG_ALIGNED_LOAD.

◆ pload< Packet16i >()

441 {
442 EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(
443 reinterpret_cast<const __m512i*>(from));
444}

References EIGEN_DEBUG_ALIGNED_LOAD.

◆ pload< Packet1cd >() [1/2]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pload< Packet1cd > ( const std::complex< double > *  from)
315{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }

References EIGEN_DEBUG_ALIGNED_LOAD, and pload< Packet2d >().

Referenced by pgather< std::complex< double >, Packet1cd >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pload< Packet1cd >() [2/2]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pload< Packet1cd > ( const std::complex< double > *  from)
93{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }

References EIGEN_DEBUG_ALIGNED_LOAD, and pload< Packet2d >().

+ Here is the call graph for this function:

◆ pload< Packet2cd >()

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pload< Packet2cd > ( const std::complex< double > *  from)
281{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cd(pload<Packet4d>((const double*)from)); }

References EIGEN_DEBUG_ALIGNED_LOAD, and pload< Packet4d >().

+ Here is the call graph for this function:

◆ pload< Packet2cf >() [1/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pload< Packet2cf > ( const std::complex< float > *  from)
76{ return Packet2cf(pload<Packet4f>((const float *) from)); }

References pload< Packet4f >().

Referenced by pgather< std::complex< float >, Packet2cf >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pload< Packet2cf >() [2/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pload< Packet2cf > ( const std::complex< float > *  from)
121{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }

References EIGEN_DEBUG_ALIGNED_LOAD, and pload< Packet4f >().

+ Here is the call graph for this function:

◆ pload< Packet2cf >() [3/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pload< Packet2cf > ( const std::complex< float > *  from)
90{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&numext::real_ref(*from))); }

References EIGEN_DEBUG_ALIGNED_LOAD, pload< Packet4f >(), and Eigen::numext::real_ref().

+ Here is the call graph for this function:

◆ pload< Packet2cf >() [4/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pload< Packet2cf > ( const std::complex< float > *  from)
92{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }

References EIGEN_DEBUG_ALIGNED_LOAD, and pload< Packet4f >().

+ Here is the call graph for this function:

◆ pload< Packet2d >() [1/2]

307{ EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }

References EIGEN_DEBUG_ALIGNED_LOAD.

Referenced by pbroadcast4< Packet2d >(), pgather< double, Packet2d >(), pload< Packet1cd >(), ploaddup< Packet2d >(), and ploadu< Packet2d >().

+ Here is the caller graph for this function:

◆ pload< Packet2d >() [2/2]

358{
359 // FIXME: No intrinsic yet
361 Packet *vfrom;
362 vfrom = (Packet *) from;
363 return vfrom->v2d;
364}

References EIGEN_DEBUG_ALIGNED_LOAD, and Eigen::internal::Packet::v2d.

◆ pload< Packet4cf >()

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pload< Packet4cf > ( const std::complex< float > *  from)
75{ EIGEN_DEBUG_ALIGNED_LOAD return Packet4cf(pload<Packet8f>(&numext::real_ref(*from))); }

References EIGEN_DEBUG_ALIGNED_LOAD, pload< Packet8f >(), and Eigen::numext::real_ref().

+ Here is the call graph for this function:

◆ pload< Packet4d >()

215{ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }

References EIGEN_DEBUG_ALIGNED_LOAD.

Referenced by pload< Packet2cd >().

+ Here is the caller graph for this function:

◆ pload< Packet4f >() [1/4]

240{
242#ifdef __VSX__
243 return vec_vsx_ld(0, from);
244#else
245 return vec_ld(0, from);
246#endif
247}

References EIGEN_DEBUG_ALIGNED_LOAD.

Referenced by pbroadcast4< Packet4f >(), pgather< float, Packet4f >(), pload< Packet2cf >(), ploaddup< Packet4f >(), ploadu< Packet4f >(), and pset1< Packet2cf >().

+ Here is the caller graph for this function:

◆ pload< Packet4f >() [2/4]

277{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }

References EIGEN_DEBUG_ALIGNED_LOAD.

◆ pload< Packet4f >() [3/4]

306{ EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }

References EIGEN_DEBUG_ALIGNED_LOAD.

◆ pload< Packet4f >() [4/4]

348{
349 // FIXME: No intrinsic yet
351 Packet4f vfrom;
352 vfrom.v4f[0] = vec_ld2f(&from[0]);
353 vfrom.v4f[1] = vec_ld2f(&from[2]);
354 return vfrom;
355}

References EIGEN_DEBUG_ALIGNED_LOAD, and Eigen::internal::Packet4f::v4f.

◆ pload< Packet4i >() [1/4]

250{
252#ifdef __VSX__
253 return vec_vsx_ld(0, from);
254#else
255 return vec_ld(0, from);
256#endif
257}

References EIGEN_DEBUG_ALIGNED_LOAD.

Referenced by pbroadcast4< Packet4i >(), pgather< int, Packet4i >(), ploaddup< Packet4i >(), and ploadu< Packet4i >().

+ Here is the caller graph for this function:

◆ pload< Packet4i >() [2/4]

308{ EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }

References EIGEN_DEBUG_ALIGNED_LOAD.

◆ pload< Packet4i >() [3/4]

339{
340 // FIXME: No intrinsic yet
342 Packet *vfrom;
343 vfrom = (Packet *) from;
344 return vfrom->v4i;
345}
Packet4i v4i
Definition PacketMath.h:54

References EIGEN_DEBUG_ALIGNED_LOAD, and Eigen::internal::Packet::v4i.

◆ pload< Packet4i >() [4/4]

278{ EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }

References EIGEN_DEBUG_ALIGNED_LOAD.

◆ pload< Packet8d >()

437 {
438 EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_pd(from);
439}

References EIGEN_DEBUG_ALIGNED_LOAD.

◆ pload< Packet8f >()

214{ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }

References EIGEN_DEBUG_ALIGNED_LOAD.

Referenced by pload< Packet4cf >().

+ Here is the caller graph for this function:

◆ pload< Packet8i >()

216{ EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }

References EIGEN_DEBUG_ALIGNED_LOAD.

◆ ploaddup()

template<typename Packet >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet Eigen::internal::ploaddup ( const typename unpacket_traits< Packet >::type *  from)
234{ return *from; }

◆ ploaddup< Packet16f >()

463 {
464 Packet8f lane0 = _mm256_broadcast_ps((const __m128*)(const void*)from);
465 // mimic an "inplace" permutation of the lower 128bits using a blend
466 lane0 = _mm256_blend_ps(
467 lane0, _mm256_castps128_ps256(_mm_permute_ps(
468 _mm256_castps256_ps128(lane0), _MM_SHUFFLE(1, 0, 1, 0))),
469 15);
470 // then we can perform a consistent permutation on the global register to get
471 // everything in shape:
472 lane0 = _mm256_permute_ps(lane0, _MM_SHUFFLE(3, 3, 2, 2));
473
474 Packet8f lane1 = _mm256_broadcast_ps((const __m128*)(const void*)(from + 4));
475 // mimic an "inplace" permutation of the lower 128bits using a blend
476 lane1 = _mm256_blend_ps(
477 lane1, _mm256_castps128_ps256(_mm_permute_ps(
478 _mm256_castps256_ps128(lane1), _MM_SHUFFLE(1, 0, 1, 0))),
479 15);
480 // then we can perform a consistent permutation on the global register to get
481 // everything in shape:
482 lane1 = _mm256_permute_ps(lane1, _MM_SHUFFLE(3, 3, 2, 2));
483
484#ifdef EIGEN_VECTORIZE_AVX512DQ
485 Packet16f res = _mm512_undefined_ps();
486 return _mm512_insertf32x8(res, lane0, 0);
487 return _mm512_insertf32x8(res, lane1, 1);
488 return res;
489#else
490 Packet16f res = _mm512_undefined_ps();
491 res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane0, 0), 0);
492 res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane0, 1), 1);
493 res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane1, 0), 2);
494 res = _mm512_insertf32x4(res, _mm256_extractf128_ps(lane1, 1), 3);
495 return res;
496#endif
497}

◆ ploaddup< Packet1cd >() [1/2]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::ploaddup< Packet1cd > ( const std::complex< double > *  from)
321{ return pset1<Packet1cd>(*from); }

References pset1< Packet1cd >().

+ Here is the call graph for this function:

◆ ploaddup< Packet1cd >() [2/2]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::ploaddup< Packet1cd > ( const std::complex< double > *  from)
183{ return pset1<Packet1cd>(*from); }

References pset1< Packet1cd >().

+ Here is the call graph for this function:

◆ ploaddup< Packet2cd >()

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::ploaddup< Packet2cd > ( const std::complex< double > *  from)
292{ return pset1<Packet2cd>(*from); }

References pset1< Packet2cd >().

+ Here is the call graph for this function:

◆ ploaddup< Packet2cf >() [1/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploaddup< Packet2cf > ( const std::complex< float > *  from)
78{ return pset1<Packet2cf>(*from); }

References pset1< Packet2cf >().

Referenced by ploaddup< Packet4cf >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ploaddup< Packet2cf >() [2/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploaddup< Packet2cf > ( const std::complex< float > *  from)
124{ return pset1<Packet2cf>(*from); }

References pset1< Packet2cf >().

+ Here is the call graph for this function:

◆ ploaddup< Packet2cf >() [3/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploaddup< Packet2cf > ( const std::complex< float > *  from)
111{ return pset1<Packet2cf>(*from); }

References pset1< Packet2cf >().

+ Here is the call graph for this function:

◆ ploaddup< Packet2cf >() [4/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploaddup< Packet2cf > ( const std::complex< float > *  from)
184{ return pset1<Packet2cf>(*from); }

References pset1< Packet2cf >().

+ Here is the call graph for this function:

◆ ploaddup< Packet2d >() [1/2]

350{ return pset1<Packet2d>(from[0]); }

References pset1< Packet2d >().

+ Here is the call graph for this function:

◆ ploaddup< Packet2d >() [2/2]

671{
672 Packet2d p = pload<Packet2d>(from);
673 return vec_perm(p, p, p16uc_PSET64_HI);
674}

References p16uc_PSET64_HI, and pload< Packet2d >().

+ Here is the call graph for this function:

◆ ploaddup< Packet4cf >()

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::ploaddup< Packet4cf > ( const std::complex< float > *  from)
85{
86 // FIXME The following might be optimized using _mm256_movedup_pd
87 Packet2cf a = ploaddup<Packet2cf>(from);
88 Packet2cf b = ploaddup<Packet2cf>(from+1);
89 return Packet4cf(_mm256_insertf128_ps(_mm256_castps128_ps256(a.v), b.v, 1));
90}

References ploaddup< Packet2cf >().

+ Here is the call graph for this function:

◆ ploaddup< Packet4d >()

239{
240 Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
241 return _mm256_permute_pd(tmp, 3<<2);
242}

◆ ploaddup< Packet4f >() [1/4]

469{
470 Packet4f p;
471 if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet4f>(from);
472 else p = ploadu<Packet4f>(from);
473 return vec_perm(p, p, p16uc_DUPLICATE32_HI);
474}

References p16uc_DUPLICATE32_HI, pload< Packet4f >(), and ploadu< Packet4f >().

+ Here is the call graph for this function:

◆ ploaddup< Packet4f >() [2/4]

284{
285 float32x2_t lo, hi;
286 lo = vld1_dup_f32(from);
287 hi = vld1_dup_f32(from+1);
288 return vcombine_f32(lo, hi);
289}

◆ ploaddup< Packet4f >() [3/4]

346{
347 return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
348}

References vec4f_swizzle1.

◆ ploaddup< Packet4f >() [4/4]

663{
664 Packet4f p = pload<Packet4f>(from);
665 p.v4f[1] = vec_splat(p.v4f[0], 1);
666 p.v4f[0] = vec_splat(p.v4f[0], 0);
667 return p;
668}

References pload< Packet4f >(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ ploaddup< Packet4i >() [1/4]

476{
477 Packet4i p;
478 if((std::ptrdiff_t(from) % 16) == 0) p = pload<Packet4i>(from);
479 else p = ploadu<Packet4i>(from);
480 return vec_perm(p, p, p16uc_DUPLICATE32_HI);
481}

References p16uc_DUPLICATE32_HI, pload< Packet4i >(), and ploadu< Packet4i >().

+ Here is the call graph for this function:

◆ ploaddup< Packet4i >() [2/4]

352{
353 Packet4i tmp;
354 tmp = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(from));
355 return vec4i_swizzle1(tmp, 0, 0, 1, 1);
356}
#define vec4i_swizzle1(v, p, q, r, s)
Definition PacketMath.h:67

References vec4i_swizzle1.

◆ ploaddup< Packet4i >() [3/4]

657{
658 Packet4i p = pload<Packet4i>(from);
659 return vec_perm(p, p, p16uc_DUPLICATE32_HI);
660}

References p16uc_DUPLICATE32_HI, and pload< Packet4i >().

+ Here is the call graph for this function:

◆ ploaddup< Packet4i >() [4/4]

291{
292 int32x2_t lo, hi;
293 lo = vld1_dup_s32(from);
294 hi = vld1_dup_s32(from+1);
295 return vcombine_s32(lo, hi);
296}

◆ ploaddup< Packet8d >()

501 {
502 Packet4d lane0 = _mm256_broadcast_pd((const __m128d*)(const void*)from);
503 lane0 = _mm256_permute_pd(lane0, 3 << 2);
504
505 Packet4d lane1 = _mm256_broadcast_pd((const __m128d*)(const void*)(from + 2));
506 lane1 = _mm256_permute_pd(lane1, 3 << 2);
507
508 Packet8d res = _mm512_undefined_pd();
509 res = _mm512_insertf64x4(res, lane0, 0);
510 return _mm512_insertf64x4(res, lane1, 1);
511}

◆ ploaddup< Packet8f >()

224{
225 // TODO try to find a way to avoid the need of a temporary register
226// Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from));
227// tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
228// return _mm256_unpacklo_ps(tmp,tmp);
229
230 // _mm256_insertf128_ps is very slow on Haswell, thus:
231 Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
232 // mimic an "inplace" permutation of the lower 128bits using a blend
233 tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
234 // then we can perform a consistent permutation on the global register to get everything in shape:
235 return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
236}

◆ ploadquad()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::ploadquad ( const typename unpacket_traits< Packet >::type *  from)
inline
244{ return pload1<Packet>(from); }

◆ ploadquad< Packet16f >()

516 {
517 Packet16f tmp = _mm512_undefined_ps();
518 tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from), 0);
519 tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 1), 1);
520 tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 2), 2);
521 tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 3), 3);
522 return tmp;
523}

◆ ploadquad< Packet8d >()

527 {
528 Packet8d tmp = _mm512_undefined_pd();
529 Packet2d tmp0 = _mm_load_pd1(from);
530 Packet2d tmp1 = _mm_load_pd1(from + 1);
531 Packet4d lane0 = _mm256_broadcastsd_pd(tmp0);
532 Packet4d lane1 = _mm256_broadcastsd_pd(tmp1);
533 tmp = _mm512_insertf64x4(tmp, lane0, 0);
534 return _mm512_insertf64x4(tmp, lane1, 1);
535}

◆ ploadquad< Packet8f >()

246{
247 Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
248 return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
249}

◆ ploadt()

template<typename Packet , int Alignment>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet Eigen::internal::ploadt ( const typename unpacket_traits< Packet >::type *  from)
461{
463 return pload<Packet>(from);
464 else
465 return ploadu<Packet>(from);
466}

◆ ploadt_ro()

template<typename Packet , int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet Eigen::internal::ploadt_ro ( const typename unpacket_traits< Packet >::type *  from)
486{
487 return ploadt<Packet, LoadMode>(from);
488}

◆ ploadu()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::ploadu ( const typename unpacket_traits< Packet >::type *  from)
inline
218{ return *from; }

◆ ploadu< Packet16f >()

447 {
448 EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_ps(from);
449}
#define EIGEN_DEBUG_UNALIGNED_LOAD
Definition GenericPacketMath.h:31

References EIGEN_DEBUG_UNALIGNED_LOAD.

◆ ploadu< Packet16i >()

455 {
456 EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(
457 reinterpret_cast<const __m512i*>(from));
458}

References EIGEN_DEBUG_UNALIGNED_LOAD.

◆ ploadu< Packet1cd >() [1/2]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::ploadu< Packet1cd > ( const std::complex< double > *  from)
317{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }

References EIGEN_DEBUG_UNALIGNED_LOAD, and ploadu< Packet2d >().

Referenced by pset1< Packet1cd >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ploadu< Packet1cd >() [2/2]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::ploadu< Packet1cd > ( const std::complex< double > *  from)
95{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }

References EIGEN_DEBUG_UNALIGNED_LOAD, and ploadu< Packet2d >().

+ Here is the call graph for this function:

◆ ploadu< Packet2cd >()

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::ploadu< Packet2cd > ( const std::complex< double > *  from)
283{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cd(ploadu<Packet4d>((const double*)from)); }

References EIGEN_DEBUG_UNALIGNED_LOAD, and ploadu< Packet4d >().

+ Here is the call graph for this function:

◆ ploadu< Packet2cf >() [1/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploadu< Packet2cf > ( const std::complex< float > *  from)
77{ return Packet2cf(ploadu<Packet4f>((const float*) from)); }

References ploadu< Packet4f >().

+ Here is the call graph for this function:

◆ ploadu< Packet2cf >() [2/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploadu< Packet2cf > ( const std::complex< float > *  from)
122{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }

References EIGEN_DEBUG_UNALIGNED_LOAD, and ploadu< Packet4f >().

+ Here is the call graph for this function:

◆ ploadu< Packet2cf >() [3/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploadu< Packet2cf > ( const std::complex< float > *  from)
91{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&numext::real_ref(*from))); }

References EIGEN_DEBUG_UNALIGNED_LOAD, ploadu< Packet4f >(), and Eigen::numext::real_ref().

+ Here is the call graph for this function:

◆ ploadu< Packet2cf >() [4/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::ploadu< Packet2cf > ( const std::complex< float > *  from)
94{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }

References EIGEN_DEBUG_UNALIGNED_LOAD, and ploadu< Packet4f >().

+ Here is the call graph for this function:

◆ ploadu< Packet2d >() [1/2]

334{
336 return _mm_loadu_pd(from);
337}

References EIGEN_DEBUG_UNALIGNED_LOAD.

Referenced by ploadu< Packet1cd >().

+ Here is the caller graph for this function:

◆ ploadu< Packet2d >() [2/2]

653{ return pload<Packet2d>(from); }

References pload< Packet2d >().

+ Here is the call graph for this function:

◆ ploadu< Packet4cf >()

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::ploadu< Packet4cf > ( const std::complex< float > *  from)
76{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cf(ploadu<Packet8f>(&numext::real_ref(*from))); }

References EIGEN_DEBUG_UNALIGNED_LOAD, ploadu< Packet8f >(), and Eigen::numext::real_ref().

+ Here is the call graph for this function:

◆ ploadu< Packet4d >()

219{ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }

References EIGEN_DEBUG_UNALIGNED_LOAD.

Referenced by ploadu< Packet2cd >().

+ Here is the caller graph for this function:

◆ ploadu< Packet4f >() [1/4]

462{
464 return (Packet4f) vec_vsx_ld((long)from & 15, (const float*) _EIGEN_ALIGNED_PTR(from));
465}
#define _EIGEN_ALIGNED_PTR(x)
Definition PacketMath.h:93

References _EIGEN_ALIGNED_PTR, and EIGEN_DEBUG_UNALIGNED_LOAD.

Referenced by ploaddup< Packet4f >(), ploadu< Packet2cf >(), and pset1< Packet2cf >().

+ Here is the caller graph for this function:

◆ ploadu< Packet4f >() [2/4]

280{ EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }

References EIGEN_DEBUG_UNALIGNED_LOAD.

◆ ploadu< Packet4f >() [3/4]

327{
329 return _mm_loadu_ps(from);
330}

References EIGEN_DEBUG_UNALIGNED_LOAD.

◆ ploadu< Packet4f >() [4/4]

652{ return pload<Packet4f>(from); }

References pload< Packet4f >().

+ Here is the call graph for this function:

◆ ploadu< Packet4i >() [1/4]

457{
459 return (Packet4i) vec_vsx_ld((long)from & 15, (const int*) _EIGEN_ALIGNED_PTR(from));
460}

References _EIGEN_ALIGNED_PTR, and EIGEN_DEBUG_UNALIGNED_LOAD.

Referenced by ploaddup< Packet4i >().

+ Here is the caller graph for this function:

◆ ploadu< Packet4i >() [2/4]

339{
341 return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
342}

References EIGEN_DEBUG_UNALIGNED_LOAD.

◆ ploadu< Packet4i >() [3/4]

651{ return pload<Packet4i>(from); }

References pload< Packet4i >().

+ Here is the call graph for this function:

◆ ploadu< Packet4i >() [4/4]

◆ ploadu< Packet8d >()

451 {
452 EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_pd(from);
453}

References EIGEN_DEBUG_UNALIGNED_LOAD.

◆ ploadu< Packet8f >()

218{ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }

References EIGEN_DEBUG_UNALIGNED_LOAD.

Referenced by ploadu< Packet4cf >().

+ Here is the caller graph for this function:

◆ ploadu< Packet8i >()

220{ EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }

References EIGEN_DEBUG_UNALIGNED_LOAD.

◆ plog()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::plog ( const Packet a)
406{ using std::log; return log(a); }
EIGEN_DEVICE_FUNC const LogReturnType log() const
Definition ArrayCwiseUnaryOps.h:105

References log().

Referenced by Eigen::internal::scalar_log_op< Scalar >::packetOp().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ plog10()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::plog10 ( const Packet a)
414{ using std::log10; return log10(a); }
EIGEN_DEVICE_FUNC const Log10ReturnType log10() const
Definition ArrayCwiseUnaryOps.h:135

References log10().

+ Here is the call graph for this function:

◆ plog1p()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::plog1p ( const Packet a)
410{ return numext::log1p(a); }

◆ plog< Packet4f >() [1/2]

96{
97 Packet4f x = _x;
98
99 Packet4i emm0;
100
101 /* isvalid_mask is 0 if x < 0 or x is NaN. */
102 Packet4ui isvalid_mask = reinterpret_cast<Packet4ui>(vec_cmpge(x, p4f_ZERO));
103 Packet4ui iszero_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(x, p4f_ZERO));
104
105 x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
106 emm0 = vec_sr(reinterpret_cast<Packet4i>(x),
107 reinterpret_cast<Packet4ui>(p4i_23));
108
109 /* keep only the fractional part */
110 x = pand(x, p4f_inv_mant_mask);
111 x = por(x, p4f_half);
112
113 emm0 = psub(emm0, p4i_0x7f);
114 Packet4f e = padd(vec_ctf(emm0, 0), p4f_1);
115
116 /* part2:
117 if( x < SQRTHF ) {
118 e -= 1;
119 x = x + x - 1.0;
120 } else { x = x - 1.0; }
121 */
122 Packet4f mask = reinterpret_cast<Packet4f>(vec_cmplt(x, p4f_cephes_SQRTHF));
123 Packet4f tmp = pand(x, mask);
124 x = psub(x, p4f_1);
125 e = psub(e, pand(p4f_1, mask));
126 x = padd(x, tmp);
127
128 Packet4f x2 = pmul(x,x);
129 Packet4f x3 = pmul(x2,x);
130
131 Packet4f y, y1, y2;
132 y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
133 y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
134 y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
135 y = pmadd(y , x, p4f_cephes_log_p2);
136 y1 = pmadd(y1, x, p4f_cephes_log_p5);
137 y2 = pmadd(y2, x, p4f_cephes_log_p8);
138 y = pmadd(y, x3, y1);
139 y = pmadd(y, x3, y2);
140 y = pmul(y, x3);
141
142 y1 = pmul(e, p4f_cephes_log_q1);
143 tmp = pmul(x2, p4f_half);
144 y = padd(y, y1);
145 x = psub(x, tmp);
146 y2 = pmul(e, p4f_cephes_log_q2);
147 x = padd(x, y);
148 x = padd(x, y2);
149 // negative arg will be NAN, 0 will be -INF
150 x = vec_sel(x, p4f_minus_inf, iszero_mask);
151 x = vec_sel(p4f_minus_nan, x, isvalid_mask);
152 return x;
153}
EIGEN_DEVICE_FUNC Packet por(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:202

References padd(), pand(), pmadd(), pmax(), pmul(), por(), psub(), and y.

+ Here is the call graph for this function:

◆ plog< Packet4f >() [2/2]

24{
25 Packet4f x = _x;
29
30 _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);
31
32 /* the smallest non denormalized float number */
33 _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos, 0x00800000);
34 _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf, 0xff800000);//-1.f/0.f);
35
36 /* natural logarithm computed for 4 simultaneous float
37 return NaN for x <= 0
38 */
39 _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);
40 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);
41 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);
42 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);
43 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);
44 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);
45 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);
46 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);
47 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);
48 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);
49 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);
50 _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);
51
52
53 Packet4i emm0;
54
55 Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN
56 Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());
57
58 x = pmax(x, p4f_min_norm_pos); /* cut off denormalized stuff */
59 emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
60
61 /* keep only the fractional part */
62 x = _mm_and_ps(x, p4f_inv_mant_mask);
63 x = _mm_or_ps(x, p4f_half);
64
65 emm0 = _mm_sub_epi32(emm0, p4i_0x7f);
66 Packet4f e = padd(Packet4f(_mm_cvtepi32_ps(emm0)), p4f_1);
67
68 /* part2:
69 if( x < SQRTHF ) {
70 e -= 1;
71 x = x + x - 1.0;
72 } else { x = x - 1.0; }
73 */
74 Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF);
75 Packet4f tmp = pand(x, mask);
76 x = psub(x, p4f_1);
77 e = psub(e, pand(p4f_1, mask));
78 x = padd(x, tmp);
79
80 Packet4f x2 = pmul(x,x);
81 Packet4f x3 = pmul(x2,x);
82
83 Packet4f y, y1, y2;
84 y = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);
85 y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);
86 y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);
87 y = pmadd(y , x, p4f_cephes_log_p2);
88 y1 = pmadd(y1, x, p4f_cephes_log_p5);
89 y2 = pmadd(y2, x, p4f_cephes_log_p8);
90 y = pmadd(y, x3, y1);
91 y = pmadd(y, x3, y2);
92 y = pmul(y, x3);
93
94 y1 = pmul(e, p4f_cephes_log_q1);
95 tmp = pmul(x2, p4f_half);
96 y = padd(y, y1);
97 x = psub(x, tmp);
98 y2 = pmul(e, p4f_cephes_log_q2);
99 x = padd(x, y);
100 x = padd(x, y2);
101 // negative arg will be NAN, 0 will be -INF
102 return _mm_or_ps(_mm_andnot_ps(iszero_mask, _mm_or_ps(x, invalid_mask)),
103 _mm_and_ps(iszero_mask, p4f_minus_inf));
104}
#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME, X)
Definition PacketMath.h:62

References _EIGEN_DECLARE_CONST_Packet4f, _EIGEN_DECLARE_CONST_Packet4f_FROM_INT, _EIGEN_DECLARE_CONST_Packet4i, padd(), pand(), pmadd(), pmax(), pmul(), psub(), and y.

+ Here is the call graph for this function:

◆ plog< Packet8f >()

121 {
122 Packet8f x = _x;
125 _EIGEN_DECLARE_CONST_Packet8f(126f, 126.0f);
126
127 _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inv_mant_mask, ~0x7f800000);
128
129 // The smallest non denormalized float number.
130 _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(min_norm_pos, 0x00800000);
131 _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(minus_inf, 0xff800000);
132
133 // Polynomial coefficients.
134 _EIGEN_DECLARE_CONST_Packet8f(cephes_SQRTHF, 0.707106781186547524f);
135 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p0, 7.0376836292E-2f);
136 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p1, -1.1514610310E-1f);
137 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p2, 1.1676998740E-1f);
138 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p3, -1.2420140846E-1f);
139 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p4, +1.4249322787E-1f);
140 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p5, -1.6668057665E-1f);
141 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p6, +2.0000714765E-1f);
142 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p7, -2.4999993993E-1f);
143 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p8, +3.3333331174E-1f);
144 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q1, -2.12194440e-4f);
145 _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q2, 0.693359375f);
146
147 Packet8f invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_NGE_UQ); // not greater equal is true if x is NaN
148 Packet8f iszero_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_EQ_OQ);
149
150 // Truncate input values to the minimum positive normal.
151 x = pmax(x, p8f_min_norm_pos);
152
153 Packet8f emm0 = pshiftright(x,23);
154 Packet8f e = _mm256_sub_ps(emm0, p8f_126f);
155
156 // Set the exponents to -1, i.e. x are in the range [0.5,1).
157 x = _mm256_and_ps(x, p8f_inv_mant_mask);
158 x = _mm256_or_ps(x, p8f_half);
159
160 // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))
161 // and shift by -1. The values are then centered around 0, which improves
162 // the stability of the polynomial evaluation.
163 // if( x < SQRTHF ) {
164 // e -= 1;
165 // x = x + x - 1.0;
166 // } else { x = x - 1.0; }
167 Packet8f mask = _mm256_cmp_ps(x, p8f_cephes_SQRTHF, _CMP_LT_OQ);
168 Packet8f tmp = _mm256_and_ps(x, mask);
169 x = psub(x, p8f_1);
170 e = psub(e, _mm256_and_ps(p8f_1, mask));
171 x = padd(x, tmp);
172
173 Packet8f x2 = pmul(x, x);
174 Packet8f x3 = pmul(x2, x);
175
176 // Evaluate the polynomial approximant of degree 8 in three parts, probably
177 // to improve instruction-level parallelism.
178 Packet8f y, y1, y2;
179 y = pmadd(p8f_cephes_log_p0, x, p8f_cephes_log_p1);
180 y1 = pmadd(p8f_cephes_log_p3, x, p8f_cephes_log_p4);
181 y2 = pmadd(p8f_cephes_log_p6, x, p8f_cephes_log_p7);
182 y = pmadd(y, x, p8f_cephes_log_p2);
183 y1 = pmadd(y1, x, p8f_cephes_log_p5);
184 y2 = pmadd(y2, x, p8f_cephes_log_p8);
185 y = pmadd(y, x3, y1);
186 y = pmadd(y, x3, y2);
187 y = pmul(y, x3);
188
189 // Add the logarithm of the exponent back to the result of the interpolation.
190 y1 = pmul(e, p8f_cephes_log_q1);
191 tmp = pmul(x2, p8f_half);
192 y = padd(y, y1);
193 x = psub(x, tmp);
194 y2 = pmul(e, p8f_cephes_log_q2);
195 x = padd(x, y);
196 x = padd(x, y2);
197
198 // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.
199 return _mm256_or_ps(
200 _mm256_andnot_ps(iszero_mask, _mm256_or_ps(x, invalid_mask)),
201 _mm256_and_ps(iszero_mask, p8f_minus_inf));
202}
#define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME, X)
Definition PacketMath.h:45
Packet8f pshiftright(Packet8f v, int n)
Definition MathFunctions.h:32

References _EIGEN_DECLARE_CONST_Packet8f, _EIGEN_DECLARE_CONST_Packet8f_FROM_INT, padd(), pmadd(), pmax(), pmul(), pshiftright(), psub(), and y.

+ Here is the call graph for this function:

◆ plset()

template<typename Packet >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet Eigen::internal::plset ( const typename unpacket_traits< Packet >::type &  a)
282{ return a; }

◆ plset< Packet16f >()

139 {
140 return _mm512_add_ps(
141 _mm512_set1_ps(a),
142 _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f,
143 4.0f, 3.0f, 2.0f, 1.0f, 0.0f));
144}

◆ plset< Packet2d >() [1/2]

195{ return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }

References pset1< Packet2d >().

+ Here is the call graph for this function:

◆ plset< Packet2d >() [2/2]

564{ return padd<Packet2d>(pset1<Packet2d>(a), p2d_COUNTDOWN); }

References p2d_COUNTDOWN, padd< Packet2d >(), and pset1< Packet2d >().

+ Here is the call graph for this function:

◆ plset< Packet4d >()

128{ return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }

◆ plset< Packet4f >() [1/4]

346{ return pset1<Packet4f>(a) + p4f_COUNTDOWN; }
static Packet4f p4f_COUNTDOWN
Definition PacketMath.h:80

References p4f_COUNTDOWN, and pset1< Packet4f >().

+ Here is the call graph for this function:

◆ plset< Packet4f >() [2/4]

149{
150 const float f[] = {0, 1, 2, 3};
151 Packet4f countdown = vld1q_f32(f);
152 return vaddq_f32(pset1<Packet4f>(a), countdown);
153}

References pset1< Packet4f >().

+ Here is the call graph for this function:

◆ plset< Packet4f >() [3/4]

194{ return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }

References pset1< Packet4f >().

+ Here is the call graph for this function:

◆ plset< Packet4f >() [4/4]

563{ return padd<Packet4f>(pset1<Packet4f>(a), p4f_COUNTDOWN); }

References p4f_COUNTDOWN, padd< Packet4f >(), and pset1< Packet4f >().

+ Here is the call graph for this function:

◆ plset< Packet4i >() [1/4]

347{ return pset1<Packet4i>(a) + p4i_COUNTDOWN; }
static Packet4i p4i_COUNTDOWN
Definition PacketMath.h:81

References p4i_COUNTDOWN, and pset1< Packet4i >().

+ Here is the call graph for this function:

◆ plset< Packet4i >() [2/4]

196{ return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }

References pset1< Packet4i >().

+ Here is the call graph for this function:

◆ plset< Packet4i >() [3/4]

562{ return padd<Packet4i>(pset1<Packet4i>(a), p4i_COUNTDOWN); }

References p4i_COUNTDOWN, padd< Packet4i >(), and pset1< Packet4i >().

+ Here is the call graph for this function:

◆ plset< Packet4i >() [4/4]

155{
156 const int32_t i[] = {0, 1, 2, 3};
157 Packet4i countdown = vld1q_s32(i);
158 return vaddq_s32(pset1<Packet4i>(a), countdown);
159}

References pset1< Packet4i >().

+ Here is the call graph for this function:

◆ plset< Packet8d >()

146 {
147 return _mm512_add_pd(_mm512_set1_pd(a),
148 _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));
149}

◆ plset< Packet8f >()

127{ return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }

◆ pmadd() [1/9]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pmadd ( const Packet a,
const Packet b,
const Packet c 
)
inline
455{ return padd(pmul(a, b),c); }

References padd(), and pmul().

+ Here is the call graph for this function:

◆ pmadd() [2/9]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pmadd ( const Packet2d a,
const Packet2d b,
const Packet2d c 
)
560{ return vec_madd(a, b, c); }

◆ pmadd() [3/9]

◆ pmadd() [4/9]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pmadd ( const Packet4f a,
const Packet4f b,
const Packet4f c 
)
220 {
221#if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM
222 // Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu,
223 // at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on
224 // -march=armv7-a, that is a very common case.
225 // See e.g. this thread:
226 // http://lists.llvm.org/pipermail/llvm-dev/2013-December/068806.html
227 // Filed LLVM bug:
228 // https://llvm.org/bugs/show_bug.cgi?id=27219
229 Packet4f r = c;
230 asm volatile(
231 "vmla.f32 %q[r], %q[a], %q[b]"
232 : [r] "+w" (r)
233 : [a] "w" (a),
234 [b] "w" (b)
235 : );
236 return r;
237#else
238 return vmlaq_f32(c,a,b);
239#endif
240}

◆ pmadd() [5/9]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pmadd ( const Packet4f a,
const Packet4f b,
const Packet4f c 
)
554{
555 Packet4f res;
556 res.v4f[0] = vec_madd(a.v4f[0], b.v4f[0], c.v4f[0]);
557 res.v4f[1] = vec_madd(a.v4f[1], b.v4f[1], c.v4f[1]);
558 return res;
559}

References Eigen::internal::Packet4f::v4f.

◆ pmadd() [6/9]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pmadd ( const Packet4i a,
const Packet4i b,
const Packet4i c 
)
389{ return a*b + c; }

◆ pmadd() [7/9]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pmadd ( const Packet4i a,
const Packet4i b,
const Packet4i c 
)
244{ return vmlaq_s32(c,a,b); }

◆ pmadd() [8/9]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pmadd ( const Packet4i a,
const Packet4i b,
const Packet4i c 
)
247{ return padd(pmul(a,b), c); }

References padd(), and pmul().

+ Here is the call graph for this function:

◆ pmadd() [9/9]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pmadd ( const Packet4i a,
const Packet4i b,
const Packet4i c 
)
552{ return padd<Packet4i>(pmul<Packet4i>(a, b), c); }

References padd< Packet4i >(), and pmul< Packet4i >().

+ Here is the call graph for this function:

◆ pmax()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pmax ( const Packet a,
const Packet b 
)
inline
186 { return numext::maxi(a, b); }

References Eigen::numext::maxi().

Referenced by generic_fast_tanh_float(), Eigen::internal::scalar_max_op< LhsScalar, RhsScalar >::packetOp(), pexp< Packet2d >(), pexp< Packet4d >(), pexp< Packet4f >(), pexp< Packet8f >(), plog< Packet4f >(), plog< Packet8f >(), and pmax< Packet4f >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pmax< Packet16f >()

243 {
244 return _mm512_max_ps(a, b);
245}

◆ pmax< Packet2d >() [1/2]

267{ return _mm_max_pd(a,b); }

Referenced by predux_max< Packet2d >(), and predux_max< Packet4f >().

+ Here is the caller graph for this function:

◆ pmax< Packet2d >() [2/2]

577{ return vec_max(a, b); }

◆ pmax< Packet4d >()

191{ return _mm256_max_pd(a,b); }

◆ pmax< Packet4f >() [1/4]

404{
405 #ifdef __VSX__
406 Packet4f ret;
407 __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
408 return ret;
409 #else
410 return vec_max(a, b);
411 #endif
412}

◆ pmax< Packet4f >() [2/4]

249{ return vmaxq_f32(a,b); }

◆ pmax< Packet4f >() [3/4]

266{ return _mm_max_ps(a,b); }

◆ pmax< Packet4f >() [4/4]

579{
580 Packet4f res;
581 res.v4f[0] = pmax(a.v4f[0], b.v4f[0]);
582 res.v4f[1] = pmax(a.v4f[1], b.v4f[1]);
583 return res;
584}

References pmax(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pmax< Packet4i >() [1/4]

413{ return vec_max(a, b); }

Referenced by predux_max< Packet4i >().

+ Here is the caller graph for this function:

◆ pmax< Packet4i >() [2/4]

250{ return vmaxq_s32(a,b); }

◆ pmax< Packet4i >() [3/4]

269{
270#ifdef EIGEN_VECTORIZE_SSE4_1
271 return _mm_max_epi32(a,b);
272#else
273 // after some bench, this version *is* faster than a scalar implementation
274 Packet4i mask = _mm_cmpgt_epi32(a,b);
275 return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
276#endif
277}

◆ pmax< Packet4i >() [4/4]

576{ return vec_max(a, b); }

◆ pmax< Packet8d >()

248 {
249 return _mm512_max_pd(a, b);
250}

◆ pmax< Packet8f >()

190{ return _mm256_max_ps(a,b); }

◆ pmin()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pmin ( const Packet a,
const Packet b 
)
inline
181 { return numext::mini(a, b); }

References Eigen::numext::mini().

Referenced by generic_fast_tanh_float(), Eigen::internal::scalar_min_op< LhsScalar, RhsScalar >::packetOp(), pexp< Packet2d >(), pexp< Packet4d >(), pexp< Packet4f >(), pexp< Packet8f >(), and pmin< Packet4f >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pmin< Packet16f >()

232 {
233 return _mm512_min_ps(a, b);
234}

◆ pmin< Packet2d >() [1/2]

254{ return _mm_min_pd(a,b); }

Referenced by predux_min< Packet2d >(), and predux_min< Packet4f >().

+ Here is the caller graph for this function:

◆ pmin< Packet2d >() [2/2]

567{ return vec_min(a, b); }

◆ pmin< Packet4d >()

188{ return _mm256_min_pd(a,b); }

◆ pmin< Packet4f >() [1/4]

392{
393 #ifdef __VSX__
394 Packet4f ret;
395 __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
396 return ret;
397 #else
398 return vec_min(a, b);
399 #endif
400}

◆ pmin< Packet4f >() [2/4]

246{ return vminq_f32(a,b); }

◆ pmin< Packet4f >() [3/4]

253{ return _mm_min_ps(a,b); }

◆ pmin< Packet4f >() [4/4]

569{
570 Packet4f res;
571 res.v4f[0] = pmin(a.v4f[0], b.v4f[0]);
572 res.v4f[1] = pmin(a.v4f[1], b.v4f[1]);
573 return res;
574}

References pmin(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pmin< Packet4i >() [1/4]

401{ return vec_min(a, b); }

Referenced by predux_min< Packet4i >().

+ Here is the caller graph for this function:

◆ pmin< Packet4i >() [2/4]

247{ return vminq_s32(a,b); }

◆ pmin< Packet4i >() [3/4]

256{
257#ifdef EIGEN_VECTORIZE_SSE4_1
258 return _mm_min_epi32(a,b);
259#else
260 // after some bench, this version *is* faster than a scalar implementation
261 Packet4i mask = _mm_cmplt_epi32(a,b);
262 return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
263#endif
264}

◆ pmin< Packet4i >() [4/4]

566{ return vec_min(a, b); }

◆ pmin< Packet8d >()

237 {
238 return _mm512_min_pd(a, b);
239}

◆ pmin< Packet8f >()

187{ return _mm256_min_ps(a,b); }

◆ pmul() [1/3]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pmul ( const Packet a,
const Packet b 
)
inline
171 { return a*b; }

Referenced by Eigen::internal::mul_assign_op< DstScalar, SrcScalar >::assignPacket(), generic_fast_tanh_float(), Eigen::internal::gebp_traits< std::complex< RealScalar >, std::complex< RealScalar >, _ConjLhs, _ConjRhs >::madd(), Eigen::internal::gebp_traits< std::complex< RealScalar >, RealScalar, _ConjLhs, false >::madd_impl(), Eigen::internal::gebp_traits< RealScalar, std::complex< RealScalar >, false, _ConjRhs >::madd_impl(), Eigen::internal::scalar_abs2_op< Scalar >::packetOp(), Eigen::internal::scalar_cube_op< Scalar >::packetOp(), Eigen::internal::scalar_product_op< LhsScalar, RhsScalar >::packetOp(), Eigen::internal::linspaced_op_impl< Scalar, Packet, false >::packetOp(), pcos< Packet4f >(), pdiv< Packet2cd >(), pdiv< Packet4cf >(), pexp< Packet2d >(), pexp< Packet4d >(), pexp< Packet4f >(), pexp< Packet8f >(), plog< Packet4f >(), plog< Packet8f >(), pmadd(), pmadd(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, false, true >::pmul(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, true, false >::pmul(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, true, true >::pmul(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, false, true >::pmul(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, true, false >::pmul(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, true, true >::pmul(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, false, true >::pmul(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, true, false >::pmul(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, true, true >::pmul(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, false, true >::pmul(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, true, false >::pmul(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, true, true >::pmul(), Eigen::internal::conj_helper< Scalar, Scalar, false, false >::pmul(), pmul< Packet2cf >(), predux_mul< Packet16f >(), predux_mul< Packet2cd >(), predux_mul< Packet2cf >(), predux_mul< Packet2d >(), predux_mul< Packet4cf >(), predux_mul< Packet4f >(), predux_mul< Packet8d >(), psin< Packet4f >(), psin< Packet8f >(), Eigen::internal::quat_product< Architecture::SSE, Derived, OtherDerived, double >::run(), Eigen::internal::etor_product_packet_impl< RowMajor, 1, Lhs, Rhs, Packet, LoadMode >::run(), and Eigen::internal::etor_product_packet_impl< ColMajor, 1, Lhs, Rhs, Packet, LoadMode >::run().

+ Here is the caller graph for this function:

◆ pmul() [2/3]

template<>
std::complex< double > Eigen::internal::pmul ( const std::complex< double > &  a,
const std::complex< double > &  b 
)
inline
530{ return std::complex<double>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }

References imag(), and real().

+ Here is the call graph for this function:

◆ pmul() [3/3]

template<>
std::complex< float > Eigen::internal::pmul ( const std::complex< float > &  a,
const std::complex< float > &  b 
)
inline
527{ return std::complex<float>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }

References imag(), and real().

+ Here is the call graph for this function:

◆ pmul< Packet16f >()

197 {
198 return _mm512_mul_ps(a, b);
199}

◆ pmul< Packet1cd >() [1/2]

295{
296 #ifdef EIGEN_VECTORIZE_SSE3
297 return Packet1cd(_mm_addsub_pd(_mm_mul_pd(_mm_movedup_pd(a.v), b.v),
298 _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
299 vec2d_swizzle1(b.v, 1, 0))));
300 #else
301 const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));
302 return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),
303 _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),
304 vec2d_swizzle1(b.v, 1, 0)), mask)));
305 #endif
306}

References vec2d_swizzle1.

Referenced by predux_mul< Packet2cf >().

+ Here is the caller graph for this function:

◆ pmul< Packet1cd >() [2/2]

150{
151 Packet2d a_re, a_im, v1, v2;
152
153 // Permute and multiply the real parts of a and b
154 a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI);
155 // Get the imaginary parts of a
156 a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO);
157 // multiply a_re * b
158 v1 = vec_madd(a_re, b.v, p2d_ZERO);
159 // multiply a_im * b and get the conjugate result
160 v2 = vec_madd(a_im, b.v, p2d_ZERO);
161 v2 = (Packet2d) vec_sld((Packet4ui)v2, (Packet4ui)v2, 8);
162 v2 = (Packet2d) vec_xor((Packet2d)v2, (Packet2d) p2ul_CONJ_XOR1);
163
164 return Packet1cd(v1 + v2);
165}

References p16uc_PSET64_HI, p16uc_PSET64_LO, and p2ul_CONJ_XOR1.

◆ pmul< Packet2cd >()

266{
267 __m256d tmp1 = _mm256_shuffle_pd(a.v,a.v,0x0);
268 __m256d even = _mm256_mul_pd(tmp1, b.v);
269 __m256d tmp2 = _mm256_shuffle_pd(a.v,a.v,0xF);
270 __m256d tmp3 = _mm256_shuffle_pd(b.v,b.v,0x5);
271 __m256d odd = _mm256_mul_pd(tmp2, tmp3);
272 return Packet2cd(_mm256_addsub_pd(even, odd));
273}

◆ pmul< Packet2cf >() [1/4]

104{
105 Packet4f v1, v2;
106
107 // Permute and multiply the real parts of a and b
108 v1 = vec_perm(a.v, a.v, p16uc_PSET32_WODD);
109 // Get the imaginary parts of a
110 v2 = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);
111 // multiply a_re * b
112 v1 = vec_madd(v1, b.v, p4f_ZERO);
113 // multiply a_im * b and get the conjugate result
114 v2 = vec_madd(v2, b.v, p4f_ZERO);
115 v2 = reinterpret_cast<Packet4f>(pxor(v2, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR)));
116 // permute back to a proper order
117 v2 = vec_perm(v2, v2, p16uc_COMPLEX32_REV);
118
119 return Packet2cf(padd<Packet4f>(v1, v2));
120}
EIGEN_DEVICE_FUNC Packet pxor(const Packet &a, const Packet &b)
Definition GenericPacketMath.h:206

References p16uc_COMPLEX32_REV, p16uc_PSET32_WEVEN, p16uc_PSET32_WODD, p4ui_CONJ_XOR, padd< Packet4f >(), and pxor().

Referenced by predux_mul< Packet2cf >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pmul< Packet2cf >() [2/4]

85{
86 Packet4f v1, v2;
87
88 // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
89 v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0));
90 // Get the imag values of a | a1_im | a1_im | a2_im | a2_im |
91 v2 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 1), vdup_lane_f32(vget_high_f32(a.v), 1));
92 // Multiply the real a with b
93 v1 = vmulq_f32(v1, b.v);
94 // Multiply the imag a with b
95 v2 = vmulq_f32(v2, b.v);
96 // Conjugate v2
97 v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR()));
98 // Swap real/imag elements in v2.
99 v2 = vrev64q_f32(v2);
100 // Add and return the result
101 return Packet2cf(vaddq_f32(v1, v2));
102}

References p4ui_CONJ_XOR.

◆ pmul< Packet2cf >() [3/4]

69{
70 #ifdef EIGEN_VECTORIZE_SSE3
71 return Packet2cf(_mm_addsub_ps(_mm_mul_ps(_mm_moveldup_ps(a.v), b.v),
72 _mm_mul_ps(_mm_movehdup_ps(a.v),
73 vec4f_swizzle1(b.v, 1, 0, 3, 2))));
74// return Packet2cf(_mm_addsub_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
75// _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
76// vec4f_swizzle1(b.v, 1, 0, 3, 2))));
77 #else
78 const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x00000000,0x80000000,0x00000000));
79 return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),
80 _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),
81 vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));
82 #endif
83}

References vec4f_swizzle1.

◆ pmul< Packet2cf >() [4/4]

167{
168 Packet2cf res;
169 res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[0]))).v;
170 res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[1]))).v;
171 return res;
172}

References pmul(), Eigen::internal::Packet2cf::v, and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pmul< Packet2d >() [1/2]

226{ return _mm_mul_pd(a,b); }

◆ pmul< Packet2d >() [2/2]

526{ return (a * b); }

◆ pmul< Packet4cf >()

63{
64 __m256 tmp1 = _mm256_mul_ps(_mm256_moveldup_ps(a.v), b.v);
65 __m256 tmp2 = _mm256_mul_ps(_mm256_movehdup_ps(a.v), _mm256_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1)));
66 __m256 result = _mm256_addsub_ps(tmp1, tmp2);
67 return Packet4cf(result);
68}

◆ pmul< Packet4d >()

150{ return _mm256_mul_pd(a,b); }

◆ pmul< Packet4f >() [1/4]

361{ return vec_madd(a,b, p4f_MZERO); }

References p4f_MZERO.

Referenced by pdiv< Packet2cf >().

+ Here is the caller graph for this function:

◆ pmul< Packet4f >() [2/4]

173{ return vmulq_f32(a,b); }

◆ pmul< Packet4f >() [3/4]

225{ return _mm_mul_ps(a,b); }

◆ pmul< Packet4f >() [4/4]

520{
521 Packet4f c;
522 c.v4f[0] = a.v4f[0] * b.v4f[0];
523 c.v4f[1] = a.v4f[1] * b.v4f[1];
524 return c;
525}

◆ pmul< Packet4i >() [1/4]

362{ return a * b; }

Referenced by pmadd().

+ Here is the caller graph for this function:

◆ pmul< Packet4i >() [2/4]

174{ return vmulq_s32(a,b); }

◆ pmul< Packet4i >() [3/4]

228{
229#ifdef EIGEN_VECTORIZE_SSE4_1
230 return _mm_mullo_epi32(a,b);
231#else
232 // this version is slightly faster than 4 scalar products
233 return vec4i_swizzle1(
235 _mm_mul_epu32(a,b),
236 _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
237 vec4i_swizzle1(b,1,0,3,2)),
238 0,2,0,2),
239 0,2,1,3);
240#endif
241}
#define vec4i_swizzle2(a, b, p, q, r, s)
Definition PacketMath.h:76

References vec4i_swizzle1, and vec4i_swizzle2.

◆ pmul< Packet4i >() [4/4]

518{ return (a * b); }

◆ pmul< Packet8d >()

202 {
203 return _mm512_mul_pd(a, b);
204}

◆ pmul< Packet8f >()

149{ return _mm256_mul_ps(a,b); }

◆ pnegate() [1/23]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pnegate ( const Packet a)
inline
161{ return -a; }

◆ pnegate() [2/23]

template<>
EIGEN_STRONG_INLINE Packet16f Eigen::internal::pnegate ( const Packet16f a)
174 {
175 return _mm512_sub_ps(_mm512_set1_ps(0.0), a);
176}

◆ pnegate() [3/23]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pnegate ( const Packet1cd a)
287{ return Packet1cd(pnegate(Packet2d(a.v))); }
EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf &a)
Definition Complex.h:100

References pnegate().

+ Here is the call graph for this function:

◆ pnegate() [4/23]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pnegate ( const Packet1cd a)
138{ return Packet1cd(pnegate(Packet2d(a.v))); }

References pnegate().

+ Here is the call graph for this function:

◆ pnegate() [5/23]

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pnegate ( const Packet2cd a)
258{ return Packet2cd(pnegate(a.v)); }

References pnegate().

+ Here is the call graph for this function:

◆ pnegate() [6/23]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pnegate ( const Packet2cf a)
100{ return Packet2cf(pnegate(a.v)); }

References pnegate().

Referenced by Eigen::internal::scalar_opposite_op< Scalar >::packetOp(), pnegate(), pnegate(), pnegate(), and pnegate().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pnegate() [7/23]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pnegate ( const Packet2cf a)
77{ return Packet2cf(pnegate<Packet4f>(a.v)); }

◆ pnegate() [8/23]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pnegate ( const Packet2cf a)
58{
59 const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
60 return Packet2cf(_mm_xor_ps(a.v,mask));
61}

◆ pnegate() [9/23]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pnegate ( const Packet2cf a)
139{ return Packet2cf(pnegate(Packet4f(a.v))); }

References pnegate().

+ Here is the call graph for this function:

◆ pnegate() [10/23]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pnegate ( const Packet2d a)
212{
213 const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
214 return _mm_xor_pd(a,mask);
215}

◆ pnegate() [11/23]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::pnegate ( const Packet2d a)
546{ return (-a); }

◆ pnegate() [12/23]

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pnegate ( const Packet4cf a)
53{
54 return Packet4cf(pnegate(a.v));
55}

References pnegate().

+ Here is the call graph for this function:

◆ pnegate() [13/23]

template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::pnegate ( const Packet4d a)
141{
142 return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
143}

◆ pnegate() [14/23]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pnegate ( const Packet4f a)
355{ return p4f_ZERO - a; }

◆ pnegate() [15/23]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pnegate ( const Packet4f a)
167{ return vnegq_f32(a); }

◆ pnegate() [16/23]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pnegate ( const Packet4f a)
207{
208 const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
209 return _mm_xor_ps(a,mask);
210}

◆ pnegate() [17/23]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::pnegate ( const Packet4f a)
540{
541 Packet4f c;
542 c.v4f[0] = -a.v4f[0];
543 c.v4f[1] = -a.v4f[1];
544 return c;
545}

◆ pnegate() [18/23]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pnegate ( const Packet4i a)
356{ return p4i_ZERO - a; }

◆ pnegate() [19/23]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pnegate ( const Packet4i a)
168{ return vnegq_s32(a); }

◆ pnegate() [20/23]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pnegate ( const Packet4i a)
217{
218 return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);
219}

References psub().

+ Here is the call graph for this function:

◆ pnegate() [21/23]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::pnegate ( const Packet4i a)
538{ return (-a); }

◆ pnegate() [22/23]

template<>
EIGEN_STRONG_INLINE Packet8d Eigen::internal::pnegate ( const Packet8d a)
178 {
179 return _mm512_sub_pd(_mm512_set1_pd(0.0), a);
180}

◆ pnegate() [23/23]

template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::pnegate ( const Packet8f a)
137{
138 return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
139}

◆ por()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::por ( const Packet a,
const Packet b 
)
inline
202{ return a | b; }

Referenced by plog< Packet4f >().

+ Here is the caller graph for this function:

◆ por< Packet16f >()

298 {
299#ifdef EIGEN_VECTORIZE_AVX512DQ
300 return _mm512_or_ps(a, b);
301#else
302 Packet16f res = _mm512_undefined_ps();
303 Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
304 Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
305 res = _mm512_insertf32x4(res, _mm_or_ps(lane0_a, lane0_b), 0);
306
307 Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
308 Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
309 res = _mm512_insertf32x4(res, _mm_or_ps(lane1_a, lane1_b), 1);
310
311 Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
312 Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
313 res = _mm512_insertf32x4(res, _mm_or_ps(lane2_a, lane2_b), 2);
314
315 Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
316 Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
317 res = _mm512_insertf32x4(res, _mm_or_ps(lane3_a, lane3_b), 3);
318
319 return res;
320#endif
321}

◆ por< Packet1cd >() [1/2]

309{ return Packet1cd(_mm_or_pd(a.v,b.v)); }

◆ por< Packet1cd >() [2/2]

176{ return Packet1cd(vec_or(a.v,b.v)); }

◆ por< Packet2cd >()

276{ return Packet2cd(_mm256_or_pd(a.v,b.v)); }

◆ por< Packet2cf >() [1/4]

123{ return Packet2cf(por<Packet4f>(a.v, b.v)); }

References por< Packet4f >().

+ Here is the call graph for this function:

◆ por< Packet2cf >() [2/4]

109{
110 return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
111}

◆ por< Packet2cf >() [3/4]

86{ return Packet2cf(_mm_or_ps(a.v,b.v)); }

◆ por< Packet2cf >() [4/4]

177{ return Packet2cf(por<Packet4f>(a.v,b.v)); }

References por< Packet4f >().

+ Here is the call graph for this function:

◆ por< Packet2d >() [1/2]

295{ return _mm_or_pd(a,b); }

◆ por< Packet2d >() [2/2]

597{ return vec_or(a, b); }

◆ por< Packet4cf >()

71{ return Packet4cf(_mm256_or_ps(a.v,b.v)); }

◆ por< Packet4d >()

206{ return _mm256_or_pd(a,b); }

◆ por< Packet4f >() [1/4]

418{ return vec_or(a, b); }

Referenced by por< Packet2cf >().

+ Here is the caller graph for this function:

◆ por< Packet4f >() [2/4]

260{
261 return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
262}

◆ por< Packet4f >() [3/4]

294{ return _mm_or_ps(a,b); }

◆ por< Packet4f >() [4/4]

599{
600 Packet4f res;
601 res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
602 res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
603 return res;
604}

References pand(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ por< Packet4i >() [1/4]

419{ return vec_or(a, b); }

◆ por< Packet4i >() [2/4]

263{ return vorrq_s32(a,b); }

◆ por< Packet4i >() [3/4]

296{ return _mm_or_si128(a,b); }

◆ por< Packet4i >() [4/4]

596{ return vec_or(a, b); }

◆ por< Packet8d >()

325 {
326#ifdef EIGEN_VECTORIZE_AVX512DQ
327 return _mm512_or_pd(a, b);
328#else
329 Packet8d res = _mm512_undefined_pd();
330 Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
331 Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
332 res = _mm512_insertf64x4(res, _mm256_or_pd(lane0_a, lane0_b), 0);
333
334 Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
335 Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
336 res = _mm512_insertf64x4(res, _mm256_or_pd(lane1_a, lane1_b), 1);
337
338 return res;
339#endif
340}

◆ por< Packet8f >()

205{ return _mm256_or_ps(a,b); }

◆ positive_real_hypot()

template<typename RealScalar >
EIGEN_STRONG_INLINE RealScalar Eigen::internal::positive_real_hypot ( const RealScalar &  x,
const RealScalar &  y 
)
77{
78 EIGEN_USING_STD_MATH(sqrt);
79 RealScalar p, qp;
80 p = numext::maxi(x,y);
81 if(p==RealScalar(0)) return RealScalar(0);
82 qp = numext::mini(y,x) / p;
83 return p * sqrt(RealScalar(1) + qp*qp);
84}

References Eigen::numext::maxi(), Eigen::numext::mini(), sqrt(), and y.

Referenced by Eigen::internal::scalar_hypot_op< Scalar, Scalar >::operator()().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ predux()

◆ predux< Packet16f >()

878 {
879 //#ifdef EIGEN_VECTORIZE_AVX512DQ
880#if 0
881 Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
882 Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
883 Packet8f sum = padd(lane0, lane1);
884 Packet8f tmp0 = _mm256_hadd_ps(sum, _mm256_permute2f128_ps(a, a, 1));
885 tmp0 = _mm256_hadd_ps(tmp0, tmp0);
886 return pfirst(_mm256_hadd_ps(tmp0, tmp0));
887#else
888 Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
889 Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
890 Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
891 Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
892 Packet4f sum = padd(padd(lane0, lane1), padd(lane2, lane3));
893 sum = _mm_hadd_ps(sum, sum);
894 sum = _mm_hadd_ps(sum, _mm_permute_ps(sum, 1));
895 return pfirst(sum);
896#endif
897}

References padd(), and pfirst().

+ Here is the call graph for this function:

◆ predux< Packet1cd >() [1/2]

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::predux< Packet1cd > ( const Packet1cd a)
339{
340 return pfirst(a);
341}

References pfirst().

+ Here is the call graph for this function:

◆ predux< Packet1cd >() [2/2]

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::predux< Packet1cd > ( const Packet1cd a)
214{
215 return pfirst(a);
216}

References pfirst().

+ Here is the call graph for this function:

◆ predux< Packet2cd >()

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::predux< Packet2cd > ( const Packet2cd a)
325{
326 return predux(padd(Packet1cd(_mm256_extractf128_pd(a.v,0)),
327 Packet1cd(_mm256_extractf128_pd(a.v,1))));
328}
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux(const Packet &a)
Definition GenericPacketMath.h:323

References padd(), and predux().

+ Here is the call graph for this function:

◆ predux< Packet2cf >() [1/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux< Packet2cf > ( const Packet2cf a)
145{
146 Packet4f b;
147 b = vec_sld(a.v, a.v, 8);
148 b = padd<Packet4f>(a.v, b);
149 return pfirst<Packet2cf>(Packet2cf(b));
150}

References padd< Packet4f >(), and pfirst< Packet2cf >().

+ Here is the call graph for this function:

◆ predux< Packet2cf >() [2/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux< Packet2cf > ( const Packet2cf a)
172{
173 float32x2_t a1, a2;
174 std::complex<float> s;
175
176 a1 = vget_low_f32(a.v);
177 a2 = vget_high_f32(a.v);
178 a2 = vadd_f32(a1, a2);
179 vst1_f32((float *)&s, a2);
180
181 return s;
182}

◆ predux< Packet2cf >() [3/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux< Packet2cf > ( const Packet2cf a)
151{
152 return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v))));
153}

References pfirst().

+ Here is the call graph for this function:

◆ predux< Packet2cf >() [4/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux< Packet2cf > ( const Packet2cf a)
218{
219 std::complex<float> res;
220 Packet1cd b = padd<Packet1cd>(a.cd[0], a.cd[1]);
221 vec_st2f(b.v, (float*)&res);
222 return res;
223}

References padd< Packet1cd >().

+ Here is the call graph for this function:

◆ predux< Packet2d >() [1/2]

554{
555 // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
556 // (from Nehalem to Haswell)
557// #ifdef EIGEN_VECTORIZE_SSE3
558// return pfirst<Packet2d>(_mm_hadd_pd(a, a));
559// #else
560 return pfirst<Packet2d>(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
561// #endif
562}

References pfirst< Packet2d >().

Referenced by predux< Packet4f >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ predux< Packet2d >() [2/2]

727{
728 Packet2d b, sum;
729 b = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8));
730 sum = padd<Packet2d>(a, b);
731 return pfirst(sum);
732}

References padd< Packet2d >(), and pfirst().

+ Here is the call graph for this function:

◆ predux< Packet4cf >()

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux< Packet4cf > ( const Packet4cf a)
138{
139 return predux(padd(Packet2cf(_mm256_extractf128_ps(a.v,0)),
140 Packet2cf(_mm256_extractf128_ps(a.v,1))));
141}

References padd(), and predux().

+ Here is the call graph for this function:

◆ predux< Packet4d >()

405{
406 return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
407}

References predux().

+ Here is the call graph for this function:

◆ predux< Packet4f >() [1/4]

552{
553 Packet4f b, sum;
554 b = vec_sld(a, a, 8);
555 sum = a + b;
556 b = vec_sld(sum, sum, 4);
557 sum += b;
558 return pfirst(sum);
559}

References pfirst().

+ Here is the call graph for this function:

◆ predux< Packet4f >() [2/4]

368{
369 float32x2_t a_lo, a_hi, sum;
370
371 a_lo = vget_low_f32(a);
372 a_hi = vget_high_f32(a);
373 sum = vpadd_f32(a_lo, a_hi);
374 sum = vpadd_f32(sum, sum);
375 return vget_lane_f32(sum, 0);
376}

◆ predux< Packet4f >() [3/4]

541{
542 // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures
543 // (from Nehalem to Haswell)
544// #ifdef EIGEN_VECTORIZE_SSE3
545// Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3));
546// return pfirst<Packet4f>(_mm_hadd_ps(tmp, tmp));
547// #else
548 Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
549 return pfirst<Packet4f>(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
550// #endif
551}

References pfirst< Packet4f >().

+ Here is the call graph for this function:

◆ predux< Packet4f >() [4/4]

734{
735 Packet2d sum;
736 sum = padd<Packet2d>(a.v4f[0], a.v4f[1]);
737 double first = predux<Packet2d>(sum);
738 return static_cast<float>(first);
739}

References padd< Packet2d >(), and predux< Packet2d >().

+ Here is the call graph for this function:

◆ predux< Packet4i >() [1/4]

590{
591 Packet4i sum;
592 sum = vec_sums(a, p4i_ZERO);
593#ifdef _BIG_ENDIAN
594 sum = vec_sld(sum, p4i_ZERO, 12);
595#else
596 sum = vec_sld(p4i_ZERO, sum, 4);
597#endif
598 return pfirst(sum);
599}

References pfirst().

+ Here is the call graph for this function:

◆ predux< Packet4i >() [2/4]

399{
400 int32x2_t a_lo, a_hi, sum;
401
402 a_lo = vget_low_s32(a);
403 a_hi = vget_high_s32(a);
404 sum = vpadd_s32(a_lo, a_hi);
405 sum = vpadd_s32(sum, sum);
406 return vget_lane_s32(sum, 0);
407}

◆ predux< Packet4i >() [3/4]

576{
577 Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
578 return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));
579}

References pfirst(), and pfirst< Packet4i >().

+ Here is the call graph for this function:

◆ predux< Packet4i >() [4/4]

717{
718 Packet4i b, sum;
719 b = vec_sld(a, a, 8);
720 sum = padd<Packet4i>(a, b);
721 b = vec_sld(sum, sum, 4);
722 sum = padd<Packet4i>(sum, b);
723 return pfirst(sum);
724}

References padd< Packet4i >(), and pfirst().

+ Here is the call graph for this function:

◆ predux< Packet8d >()

899 {
900 Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
901 Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
902 Packet4d sum = padd(lane0, lane1);
903 Packet4d tmp0 = _mm256_hadd_pd(sum, _mm256_permute2f128_pd(sum, sum, 1));
904 return pfirst(_mm256_hadd_pd(tmp0, tmp0));
905}

References padd(), and pfirst().

+ Here is the call graph for this function:

◆ predux< Packet8f >()

401{
402 return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1))));
403}

References predux().

+ Here is the call graph for this function:

◆ predux_downto4() [1/2]

template<typename Packet >
const DoublePacket< Packet > & Eigen::internal::predux_downto4 ( const DoublePacket< Packet > &  a)
584{
585 return a;
586}

◆ predux_downto4() [2/2]

template<typename Packet >
EIGEN_DEVICE_FUNC conditional<(unpacket_traits< Packet >::size%8)==0, typenameunpacket_traits< Packet >::half, Packet >::type Eigen::internal::predux_downto4 ( const Packet a)
inline
333{ return a; }

Referenced by Eigen::internal::gebp_kernel< LhsScalar, RhsScalar, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs >::operator()().

+ Here is the caller graph for this function:

◆ predux_downto4< Packet16f >()

908 {
909#ifdef EIGEN_VECTORIZE_AVX512DQ
910 Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
911 Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
912 return padd(lane0, lane1);
913#else
914 Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
915 Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
916 Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
917 Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
918 Packet4f sum0 = padd(lane0, lane2);
919 Packet4f sum1 = padd(lane1, lane3);
920 return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1);
921#endif
922}

References padd().

+ Here is the call graph for this function:

◆ predux_downto4< Packet8d >()

924 {
925 Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
926 Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
927 Packet4d res = padd(lane0, lane1);
928 return res;
929}

References padd().

+ Here is the call graph for this function:

◆ predux_downto4< Packet8f >()

410{
411 return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
412}

◆ predux_max()

template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type Eigen::internal::predux_max ( const Packet a)
inline
345{ return a; }

Referenced by Eigen::internal::scalar_max_op< LhsScalar, RhsScalar >::predux().

+ Here is the caller graph for this function:

◆ predux_max< Packet16f >()

980 {
981 Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
982 Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
983 Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
984 Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
985 Packet4f res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3));
986 res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
987 return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
988}

References pfirst().

+ Here is the call graph for this function:

◆ predux_max< Packet2d >() [1/2]

650{
651 return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
652}

References pfirst< Packet2d >().

+ Here is the call graph for this function:

◆ predux_max< Packet2d >() [2/2]

848{
849 return pfirst(pmax<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
850}

References pfirst(), and pmax< Packet2d >().

+ Here is the call graph for this function:

◆ predux_max< Packet4d >()

448{
449 Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
450 return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
451}

References pfirst().

+ Here is the call graph for this function:

◆ predux_max< Packet4f >() [1/4]

664{
665 Packet4f b, res;
666 b = vec_max(a, vec_sld(a, a, 8));
667 res = vec_max(b, vec_sld(b, b, 4));
668 return pfirst(res);
669}

References pfirst().

+ Here is the call graph for this function:

◆ predux_max< Packet4f >() [2/4]

487{
488 float32x2_t a_lo, a_hi, max;
489
490 a_lo = vget_low_f32(a);
491 a_hi = vget_high_f32(a);
492 max = vpmax_f32(a_lo, a_hi);
493 max = vpmax_f32(max, max);
494
495 return vget_lane_f32(max, 0);
496}

◆ predux_max< Packet4f >() [3/4]

645{
646 Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
647 return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
648}

References pfirst< Packet4f >().

+ Here is the call graph for this function:

◆ predux_max< Packet4f >() [4/4]

853{
854 Packet2d b, res;
855 b = pmax<Packet2d>(a.v4f[0], a.v4f[1]);
856 res = pmax<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));
857 return static_cast<float>(pfirst(res));
858}

References pfirst(), and pmax< Packet2d >().

+ Here is the call graph for this function:

◆ predux_max< Packet4i >() [1/4]

672{
673 Packet4i b, res;
674 b = vec_max(a, vec_sld(a, a, 8));
675 res = vec_max(b, vec_sld(b, b, 4));
676 return pfirst(res);
677}

References pfirst().

+ Here is the call graph for this function:

◆ predux_max< Packet4i >() [2/4]

499{
500 int32x2_t a_lo, a_hi, max;
501
502 a_lo = vget_low_s32(a);
503 a_hi = vget_high_s32(a);
504 max = vpmax_s32(a_lo, a_hi);
505 max = vpmax_s32(max, max);
506
507 return vget_lane_s32(max, 0);
508}

◆ predux_max< Packet4i >() [3/4]

654{
655#ifdef EIGEN_VECTORIZE_SSE4_1
656 Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
657 return pfirst<Packet4i>(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
658#else
659 // after some experiments, it is seems this is the fastest way to implement it
660 // for GCC (eg., it does not like using std::min after the pstore !!)
661 EIGEN_ALIGN16 int aux[4];
662 pstore(aux, a);
663 int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
664 int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
665 return aux0>aux2 ? aux0 : aux2;
666#endif // EIGEN_VECTORIZE_SSE4_1
667}

References EIGEN_ALIGN16, pfirst< Packet4i >(), and pstore().

+ Here is the call graph for this function:

◆ predux_max< Packet4i >() [4/4]

839{
840 Packet4i b, res;
841 b = pmax<Packet4i>(a, vec_sld(a, a, 8));
842 res = pmax<Packet4i>(b, vec_sld(b, b, 4));
843 return pfirst(res);
844}

References pfirst(), and pmax< Packet4i >().

+ Here is the call graph for this function:

◆ predux_max< Packet8d >()

990 {
991 Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
992 Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
993 Packet4d res = _mm256_max_pd(lane0, lane1);
994 res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1));
995 return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));
996}

References pfirst().

+ Here is the call graph for this function:

◆ predux_max< Packet8f >()

441{
442 Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
443 tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
444 return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
445}

References pfirst().

+ Here is the call graph for this function:

◆ predux_min()

template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type Eigen::internal::predux_min ( const Packet a)
inline
341{ return a; }

Referenced by Eigen::internal::scalar_min_op< LhsScalar, RhsScalar >::predux().

+ Here is the caller graph for this function:

◆ predux_min< Packet16f >()

961 {
962 Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
963 Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
964 Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
965 Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
966 Packet4f res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3));
967 res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
968 return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
969}

References pfirst().

+ Here is the call graph for this function:

◆ predux_min< Packet2d >() [1/2]

624{
625 return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
626}

References pfirst< Packet2d >().

+ Here is the call graph for this function:

◆ predux_min< Packet2d >() [2/2]

825{
826 return pfirst(pmin<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
827}

References pfirst(), and pmin< Packet2d >().

+ Here is the call graph for this function:

◆ predux_min< Packet4d >()

435{
436 Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
437 return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
438}

References pfirst().

+ Here is the call graph for this function:

◆ predux_min< Packet4f >() [1/4]

647{
648 Packet4f b, res;
649 b = vec_min(a, vec_sld(a, a, 8));
650 res = vec_min(b, vec_sld(b, b, 4));
651 return pfirst(res);
652}

References pfirst().

+ Here is the call graph for this function:

◆ predux_min< Packet4f >() [2/4]

462{
463 float32x2_t a_lo, a_hi, min;
464
465 a_lo = vget_low_f32(a);
466 a_hi = vget_high_f32(a);
467 min = vpmin_f32(a_lo, a_hi);
468 min = vpmin_f32(min, min);
469
470 return vget_lane_f32(min, 0);
471}

◆ predux_min< Packet4f >() [3/4]

619{
620 Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
621 return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
622}

References pfirst< Packet4f >().

+ Here is the call graph for this function:

◆ predux_min< Packet4f >() [4/4]

830{
831 Packet2d b, res;
832 b = pmin<Packet2d>(a.v4f[0], a.v4f[1]);
833 res = pmin<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));
834 return static_cast<float>(pfirst(res));
835}

References pfirst(), and pmin< Packet2d >().

+ Here is the call graph for this function:

◆ predux_min< Packet4i >() [1/4]

655{
656 Packet4i b, res;
657 b = vec_min(a, vec_sld(a, a, 8));
658 res = vec_min(b, vec_sld(b, b, 4));
659 return pfirst(res);
660}

References pfirst().

+ Here is the call graph for this function:

◆ predux_min< Packet4i >() [2/4]

474{
475 int32x2_t a_lo, a_hi, min;
476
477 a_lo = vget_low_s32(a);
478 a_hi = vget_high_s32(a);
479 min = vpmin_s32(a_lo, a_hi);
480 min = vpmin_s32(min, min);
481
482 return vget_lane_s32(min, 0);
483}

◆ predux_min< Packet4i >() [3/4]

628{
629#ifdef EIGEN_VECTORIZE_SSE4_1
630 Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));
631 return pfirst<Packet4i>(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));
632#else
633 // after some experiments, it is seems this is the fastest way to implement it
634 // for GCC (eg., it does not like using std::min after the pstore !!)
635 EIGEN_ALIGN16 int aux[4];
636 pstore(aux, a);
637 int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
638 int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
639 return aux0<aux2 ? aux0 : aux2;
640#endif // EIGEN_VECTORIZE_SSE4_1
641}

References EIGEN_ALIGN16, pfirst< Packet4i >(), and pstore().

+ Here is the call graph for this function:

◆ predux_min< Packet4i >() [4/4]

817{
818 Packet4i b, res;
819 b = pmin<Packet4i>(a, vec_sld(a, a, 8));
820 res = pmin<Packet4i>(b, vec_sld(b, b, 4));
821 return pfirst(res);
822}

References pfirst(), and pmin< Packet4i >().

+ Here is the call graph for this function:

◆ predux_min< Packet8d >()

971 {
972 Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
973 Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
974 Packet4d res = _mm256_min_pd(lane0, lane1);
975 res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1));
976 return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1)));
977}

References pfirst().

+ Here is the call graph for this function:

◆ predux_min< Packet8f >()

429{
430 Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
431 tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
432 return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
433}

References pfirst().

+ Here is the call graph for this function:

◆ predux_mul()

template<typename Packet >
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type Eigen::internal::predux_mul ( const Packet a)
inline
337{ return a; }

Referenced by Eigen::internal::scalar_product_op< LhsScalar, RhsScalar >::predux(), predux_mul< Packet4cf >(), and predux_mul< Packet4f >().

+ Here is the caller graph for this function:

◆ predux_mul< Packet16f >()

932 {
933//#ifdef EIGEN_VECTORIZE_AVX512DQ
934#if 0
935 Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);
936 Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);
937 Packet8f res = pmul(lane0, lane1);
938 res = pmul(res, _mm256_permute2f128_ps(res, res, 1));
939 res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
940 return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
941#else
942 Packet4f lane0 = _mm512_extractf32x4_ps(a, 0);
943 Packet4f lane1 = _mm512_extractf32x4_ps(a, 1);
944 Packet4f lane2 = _mm512_extractf32x4_ps(a, 2);
945 Packet4f lane3 = _mm512_extractf32x4_ps(a, 3);
946 Packet4f res = pmul(pmul(lane0, lane1), pmul(lane2, lane3));
947 res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));
948 return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));
949#endif
950}

References pfirst(), and pmul().

+ Here is the call graph for this function:

◆ predux_mul< Packet1cd >() [1/2]

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::predux_mul< Packet1cd > ( const Packet1cd a)
349{
350 return pfirst(a);
351}

References pfirst().

+ Here is the call graph for this function:

◆ predux_mul< Packet1cd >() [2/2]

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::predux_mul< Packet1cd > ( const Packet1cd a)
240{
241 return pfirst(a);
242}

References pfirst().

+ Here is the call graph for this function:

◆ predux_mul< Packet2cd >()

template<>
EIGEN_STRONG_INLINE std::complex< double > Eigen::internal::predux_mul< Packet2cd > ( const Packet2cd a)
339{
340 return predux(pmul(Packet1cd(_mm256_extractf128_pd(a.v,0)),
341 Packet1cd(_mm256_extractf128_pd(a.v,1))));
342}

References pmul(), and predux().

+ Here is the call graph for this function:

◆ predux_mul< Packet2cf >() [1/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux_mul< Packet2cf > ( const Packet2cf a)
169{
170 Packet4f b;
171 Packet2cf prod;
172 b = vec_sld(a.v, a.v, 8);
173 prod = pmul<Packet2cf>(a, Packet2cf(b));
174
175 return pfirst<Packet2cf>(prod);
176}

References pfirst< Packet2cf >(), and pmul< Packet2cf >().

+ Here is the call graph for this function:

◆ predux_mul< Packet2cf >() [2/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux_mul< Packet2cf > ( const Packet2cf a)
197{
198 float32x2_t a1, a2, v1, v2, prod;
199 std::complex<float> s;
200
201 a1 = vget_low_f32(a.v);
202 a2 = vget_high_f32(a.v);
203 // Get the real values of a | a1_re | a1_re | a2_re | a2_re |
204 v1 = vdup_lane_f32(a1, 0);
205 // Get the real values of a | a1_im | a1_im | a2_im | a2_im |
206 v2 = vdup_lane_f32(a1, 1);
207 // Multiply the real a with b
208 v1 = vmul_f32(v1, a2);
209 // Multiply the imag a with b
210 v2 = vmul_f32(v2, a2);
211 // Conjugate v2
212 v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR()));
213 // Swap real/imag elements in v2.
214 v2 = vrev64_f32(v2);
215 // Add v1, v2
216 prod = vadd_f32(v1, v2);
217
218 vst1_f32((float *)&s, prod);
219
220 return s;
221}
uint32x2_t p2ui_CONJ_XOR()
Definition Complex.h:29

References p2ui_CONJ_XOR().

+ Here is the call graph for this function:

◆ predux_mul< Packet2cf >() [3/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux_mul< Packet2cf > ( const Packet2cf a)
161{
162 return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v))));
163}

References pfirst(), and pmul().

+ Here is the call graph for this function:

◆ predux_mul< Packet2cf >() [4/4]

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux_mul< Packet2cf > ( const Packet2cf a)
244{
245 std::complex<float> res;
246 Packet1cd b = pmul<Packet1cd>(a.cd[0], a.cd[1]);
247 vec_st2f(b.v, (float*)&res);
248 return res;
249}

References pmul< Packet1cd >().

+ Here is the call graph for this function:

◆ predux_mul< Packet2d >() [1/2]

604{
605 return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
606}

References pfirst< Packet2d >().

+ Here is the call graph for this function:

◆ predux_mul< Packet2d >() [2/2]

805{
806 return pfirst(pmul(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));
807}

References pfirst(), and pmul().

+ Here is the call graph for this function:

◆ predux_mul< Packet4cf >()

template<>
EIGEN_STRONG_INLINE std::complex< float > Eigen::internal::predux_mul< Packet4cf > ( const Packet4cf a)
159{
160 return predux_mul(pmul(Packet2cf(_mm256_extractf128_ps(a.v, 0)),
161 Packet2cf(_mm256_extractf128_ps(a.v, 1))));
162}
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux_mul(const Packet &a)
Definition GenericPacketMath.h:336

References pmul(), and predux_mul().

+ Here is the call graph for this function:

◆ predux_mul< Packet4d >()

422{
423 Packet4d tmp;
424 tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
425 return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
426}

References pfirst().

+ Here is the call graph for this function:

◆ predux_mul< Packet4f >() [1/4]

632{
633 Packet4f prod;
634 prod = pmul(a, vec_sld(a, a, 8));
635 return pfirst(pmul(prod, vec_sld(prod, prod, 4)));
636}

References pfirst(), and pmul().

+ Here is the call graph for this function:

◆ predux_mul< Packet4f >() [2/4]

432{
433 float32x2_t a_lo, a_hi, prod;
434
435 // Get a_lo = |a1|a2| and a_hi = |a3|a4|
436 a_lo = vget_low_f32(a);
437 a_hi = vget_high_f32(a);
438 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
439 prod = vmul_f32(a_lo, a_hi);
440 // Multiply prod with its swapped value |a2*a4|a1*a3|
441 prod = vmul_f32(prod, vrev64_f32(prod));
442
443 return vget_lane_f32(prod, 0);
444}

◆ predux_mul< Packet4f >() [3/4]

599{
600 Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
601 return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
602}

References pfirst< Packet4f >().

+ Here is the call graph for this function:

◆ predux_mul< Packet4f >() [4/4]

810{
811 // Return predux_mul<Packet2d> of the subvectors product
812 return static_cast<float>(pfirst(predux_mul(pmul(a.v4f[0], a.v4f[1]))));
813}

References pfirst(), pmul(), and predux_mul().

+ Here is the call graph for this function:

◆ predux_mul< Packet4i >() [1/4]

639{
640 EIGEN_ALIGN16 int aux[4];
641 pstore(aux, a);
642 return aux[0] * aux[1] * aux[2] * aux[3];
643}

References EIGEN_ALIGN16, and pstore().

+ Here is the call graph for this function:

◆ predux_mul< Packet4i >() [2/4]

446{
447 int32x2_t a_lo, a_hi, prod;
448
449 // Get a_lo = |a1|a2| and a_hi = |a3|a4|
450 a_lo = vget_low_s32(a);
451 a_hi = vget_high_s32(a);
452 // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|
453 prod = vmul_s32(a_lo, a_hi);
454 // Multiply prod with its swapped value |a2*a4|a1*a3|
455 prod = vmul_s32(prod, vrev64_s32(prod));
456
457 return vget_lane_s32(prod, 0);
458}

◆ predux_mul< Packet4i >() [3/4]

608{
609 // after some experiments, it is seems this is the fastest way to implement it
610 // for GCC (eg., reusing pmul is very slow !)
611 // TODO try to call _mm_mul_epu32 directly
612 EIGEN_ALIGN16 int aux[4];
613 pstore(aux, a);
614 return (aux[0] * aux[1]) * (aux[2] * aux[3]);;
615}

References EIGEN_ALIGN16, and pstore().

+ Here is the call graph for this function:

◆ predux_mul< Packet4i >() [4/4]

798{
799 EIGEN_ALIGN16 int aux[4];
800 pstore(aux, a);
801 return aux[0] * aux[1] * aux[2] * aux[3];
802}

References EIGEN_ALIGN16, and pstore().

+ Here is the call graph for this function:

◆ predux_mul< Packet8d >()

952 {
953 Packet4d lane0 = _mm512_extractf64x4_pd(a, 0);
954 Packet4d lane1 = _mm512_extractf64x4_pd(a, 1);
955 Packet4d res = pmul(lane0, lane1);
956 res = pmul(res, _mm256_permute2f128_pd(res, res, 1));
957 return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1)));
958}

References pfirst(), and pmul().

+ Here is the call graph for this function:

◆ predux_mul< Packet8f >()

415{
416 Packet8f tmp;
417 tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
418 tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
419 return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
420}

References pfirst().

+ Here is the call graph for this function:

◆ preduxp()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::preduxp ( const Packet vecs)
inline
320{ return vecs[0]; }

◆ preduxp< Packet16f >()

688{
689 EIGEN_EXTRACT_8f_FROM_16f(vecs[0], vecs0);
690 EIGEN_EXTRACT_8f_FROM_16f(vecs[1], vecs1);
691 EIGEN_EXTRACT_8f_FROM_16f(vecs[2], vecs2);
692 EIGEN_EXTRACT_8f_FROM_16f(vecs[3], vecs3);
693 EIGEN_EXTRACT_8f_FROM_16f(vecs[4], vecs4);
694 EIGEN_EXTRACT_8f_FROM_16f(vecs[5], vecs5);
695 EIGEN_EXTRACT_8f_FROM_16f(vecs[6], vecs6);
696 EIGEN_EXTRACT_8f_FROM_16f(vecs[7], vecs7);
697 EIGEN_EXTRACT_8f_FROM_16f(vecs[8], vecs8);
698 EIGEN_EXTRACT_8f_FROM_16f(vecs[9], vecs9);
699 EIGEN_EXTRACT_8f_FROM_16f(vecs[10], vecs10);
700 EIGEN_EXTRACT_8f_FROM_16f(vecs[11], vecs11);
701 EIGEN_EXTRACT_8f_FROM_16f(vecs[12], vecs12);
702 EIGEN_EXTRACT_8f_FROM_16f(vecs[13], vecs13);
703 EIGEN_EXTRACT_8f_FROM_16f(vecs[14], vecs14);
704 EIGEN_EXTRACT_8f_FROM_16f(vecs[15], vecs15);
705
706 __m256 hsum1 = _mm256_hadd_ps(vecs0_0, vecs1_0);
707 __m256 hsum2 = _mm256_hadd_ps(vecs2_0, vecs3_0);
708 __m256 hsum3 = _mm256_hadd_ps(vecs4_0, vecs5_0);
709 __m256 hsum4 = _mm256_hadd_ps(vecs6_0, vecs7_0);
710
711 __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
712 __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
713 __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
714 __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
715
716 __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
717 __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
718 __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
719 __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
720
721 __m256 sum1 = _mm256_add_ps(perm1, hsum5);
722 __m256 sum2 = _mm256_add_ps(perm2, hsum6);
723 __m256 sum3 = _mm256_add_ps(perm3, hsum7);
724 __m256 sum4 = _mm256_add_ps(perm4, hsum8);
725
726 __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
727 __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
728
729 __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
730
731 hsum1 = _mm256_hadd_ps(vecs0_1, vecs1_1);
732 hsum2 = _mm256_hadd_ps(vecs2_1, vecs3_1);
733 hsum3 = _mm256_hadd_ps(vecs4_1, vecs5_1);
734 hsum4 = _mm256_hadd_ps(vecs6_1, vecs7_1);
735
736 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
737 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
738 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
739 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
740
741 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
742 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
743 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
744 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
745
746 sum1 = _mm256_add_ps(perm1, hsum5);
747 sum2 = _mm256_add_ps(perm2, hsum6);
748 sum3 = _mm256_add_ps(perm3, hsum7);
749 sum4 = _mm256_add_ps(perm4, hsum8);
750
751 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
752 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
753
754 final = padd(final, _mm256_blend_ps(blend1, blend2, 0xf0));
755
756 hsum1 = _mm256_hadd_ps(vecs8_0, vecs9_0);
757 hsum2 = _mm256_hadd_ps(vecs10_0, vecs11_0);
758 hsum3 = _mm256_hadd_ps(vecs12_0, vecs13_0);
759 hsum4 = _mm256_hadd_ps(vecs14_0, vecs15_0);
760
761 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
762 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
763 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
764 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
765
766 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
767 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
768 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
769 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
770
771 sum1 = _mm256_add_ps(perm1, hsum5);
772 sum2 = _mm256_add_ps(perm2, hsum6);
773 sum3 = _mm256_add_ps(perm3, hsum7);
774 sum4 = _mm256_add_ps(perm4, hsum8);
775
776 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
777 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
778
779 __m256 final_1 = _mm256_blend_ps(blend1, blend2, 0xf0);
780
781 hsum1 = _mm256_hadd_ps(vecs8_1, vecs9_1);
782 hsum2 = _mm256_hadd_ps(vecs10_1, vecs11_1);
783 hsum3 = _mm256_hadd_ps(vecs12_1, vecs13_1);
784 hsum4 = _mm256_hadd_ps(vecs14_1, vecs15_1);
785
786 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
787 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
788 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
789 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
790
791 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
792 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
793 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
794 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
795
796 sum1 = _mm256_add_ps(perm1, hsum5);
797 sum2 = _mm256_add_ps(perm2, hsum6);
798 sum3 = _mm256_add_ps(perm3, hsum7);
799 sum4 = _mm256_add_ps(perm4, hsum8);
800
801 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
802 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
803
804 final_1 = padd(final_1, _mm256_blend_ps(blend1, blend2, 0xf0));
805
806 __m512 final_output;
807
808 EIGEN_INSERT_8f_INTO_16f(final_output, final, final_1);
809 return final_output;
810}
#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB)
Definition PacketMath.h:680
#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT)
Definition PacketMath.h:666

References EIGEN_EXTRACT_8f_FROM_16f, EIGEN_INSERT_8f_INTO_16f, and padd().

+ Here is the call graph for this function:

◆ preduxp< Packet1cd >() [1/2]

344{
345 return vecs[0];
346}

◆ preduxp< Packet1cd >() [2/2]

226{
227 return vecs[0];
228}

◆ preduxp< Packet2cd >()

331{
332 Packet4d t0 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 0 + (2<<4));
333 Packet4d t1 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 1 + (3<<4));
334
335 return Packet2cd(_mm256_add_pd(t0,t1));
336}

◆ preduxp< Packet2cf >() [1/4]

153{
154 Packet4f b1, b2;
155#ifdef _BIG_ENDIAN
156 b1 = vec_sld(vecs[0].v, vecs[1].v, 8);
157 b2 = vec_sld(vecs[1].v, vecs[0].v, 8);
158#else
159 b1 = vec_sld(vecs[1].v, vecs[0].v, 8);
160 b2 = vec_sld(vecs[0].v, vecs[1].v, 8);
161#endif
162 b2 = vec_sld(b2, b2, 8);
163 b2 = padd<Packet4f>(b1, b2);
164
165 return Packet2cf(b2);
166}

References padd< Packet4f >().

+ Here is the call graph for this function:

◆ preduxp< Packet2cf >() [2/4]

185{
186 Packet4f sum1, sum2, sum;
187
188 // Add the first two 64-bit float32x2_t of vecs[0]
189 sum1 = vcombine_f32(vget_low_f32(vecs[0].v), vget_low_f32(vecs[1].v));
190 sum2 = vcombine_f32(vget_high_f32(vecs[0].v), vget_high_f32(vecs[1].v));
191 sum = vaddq_f32(sum1, sum2);
192
193 return Packet2cf(sum);
194}

◆ preduxp< Packet2cf >() [3/4]

156{
157 return Packet2cf(_mm_add_ps(_mm_movelh_ps(vecs[0].v,vecs[1].v), _mm_movehl_ps(vecs[1].v,vecs[0].v)));
158}

◆ preduxp< Packet2cf >() [4/4]

230{
231 PacketBlock<Packet2cf,2> transpose;
232 transpose.packet[0] = vecs[0];
233 transpose.packet[1] = vecs[1];
234 ptranspose(transpose);
235
236 return padd<Packet2cf>(transpose.packet[0], transpose.packet[1]);
237}
EIGEN_STRONG_INLINE void ptranspose(PacketBlock< Packet2cf, 2 > &kernel)
Definition Complex.h:242
Packet packet[N]
Definition GenericPacketMath.h:540
Definition GenericPacketMath.h:539

References Eigen::internal::PacketBlock< Packet, N >::packet, padd< Packet2cf >(), and ptranspose().

+ Here is the call graph for this function:

◆ preduxp< Packet2d >() [1/2]

535{
536 return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
537}

◆ preduxp< Packet2d >() [2/2]

770{
771 Packet2d v[2], sum;
772 v[0] = padd<Packet2d>(vecs[0], reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(vecs[0]), reinterpret_cast<Packet4ui>(vecs[0]), 8)));
773 v[1] = padd<Packet2d>(vecs[1], reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(vecs[1]), reinterpret_cast<Packet4ui>(vecs[1]), 8)));
774
775 sum = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(v[0]), reinterpret_cast<Packet4ui>(v[1]), 8));
776
777 return sum;
778}

References padd< Packet2d >().

+ Here is the call graph for this function:

◆ preduxp< Packet4cf >()

144{
145 Packet8f t0 = _mm256_shuffle_ps(vecs[0].v, vecs[0].v, _MM_SHUFFLE(3, 1, 2 ,0));
146 Packet8f t1 = _mm256_shuffle_ps(vecs[1].v, vecs[1].v, _MM_SHUFFLE(3, 1, 2 ,0));
147 t0 = _mm256_hadd_ps(t0,t1);
148 Packet8f t2 = _mm256_shuffle_ps(vecs[2].v, vecs[2].v, _MM_SHUFFLE(3, 1, 2 ,0));
149 Packet8f t3 = _mm256_shuffle_ps(vecs[3].v, vecs[3].v, _MM_SHUFFLE(3, 1, 2 ,0));
150 t2 = _mm256_hadd_ps(t2,t3);
151
152 t1 = _mm256_permute2f128_ps(t0,t2, 0 + (2<<4));
153 t3 = _mm256_permute2f128_ps(t0,t2, 1 + (3<<4));
154
155 return Packet4cf(_mm256_add_ps(t1,t3));
156}

◆ preduxp< Packet4d >()

388{
389 Packet4d tmp0, tmp1;
390
391 tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
392 tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
393
394 tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
395 tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
396
397 return _mm256_blend_pd(tmp0, tmp1, 0xC);
398}

◆ preduxp< Packet4f >() [1/4]

562{
563 Packet4f v[4], sum[4];
564
565 // It's easier and faster to transpose then add as columns
566 // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
567 // Do the transpose, first set of moves
568 v[0] = vec_mergeh(vecs[0], vecs[2]);
569 v[1] = vec_mergel(vecs[0], vecs[2]);
570 v[2] = vec_mergeh(vecs[1], vecs[3]);
571 v[3] = vec_mergel(vecs[1], vecs[3]);
572 // Get the resulting vectors
573 sum[0] = vec_mergeh(v[0], v[2]);
574 sum[1] = vec_mergel(v[0], v[2]);
575 sum[2] = vec_mergeh(v[1], v[3]);
576 sum[3] = vec_mergel(v[1], v[3]);
577
578 // Now do the summation:
579 // Lines 0+1
580 sum[0] = sum[0] + sum[1];
581 // Lines 2+3
582 sum[1] = sum[2] + sum[3];
583 // Add the results
584 sum[0] = sum[0] + sum[1];
585
586 return sum[0];
587}

◆ preduxp< Packet4f >() [2/4]

379{
380 float32x4x2_t vtrn1, vtrn2, res1, res2;
381 Packet4f sum1, sum2, sum;
382
383 // NEON zip performs interleaving of the supplied vectors.
384 // We perform two interleaves in a row to acquire the transposed vector
385 vtrn1 = vzipq_f32(vecs[0], vecs[2]);
386 vtrn2 = vzipq_f32(vecs[1], vecs[3]);
387 res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
388 res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
389
390 // Do the addition of the resulting vectors
391 sum1 = vaddq_f32(res1.val[0], res1.val[1]);
392 sum2 = vaddq_f32(res2.val[0], res2.val[1]);
393 sum = vaddq_f32(sum1, sum2);
394
395 return sum;
396}

◆ preduxp< Packet4f >() [3/4]

521{
522 Packet4f tmp0, tmp1, tmp2;
523 tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
524 tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
525 tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
526 tmp0 = _mm_add_ps(tmp0, tmp1);
527 tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
528 tmp1 = _mm_add_ps(tmp1, tmp2);
529 tmp2 = _mm_movehl_ps(tmp1, tmp0);
530 tmp0 = _mm_movelh_ps(tmp0, tmp1);
531 return _mm_add_ps(tmp0, tmp2);
532}

◆ preduxp< Packet4f >() [4/4]

781{
782 PacketBlock<Packet4f,4> transpose;
783 transpose.packet[0] = vecs[0];
784 transpose.packet[1] = vecs[1];
785 transpose.packet[2] = vecs[2];
786 transpose.packet[3] = vecs[3];
787 ptranspose(transpose);
788
789 Packet4f sum = padd(transpose.packet[0], transpose.packet[1]);
790 sum = padd(sum, transpose.packet[2]);
791 sum = padd(sum, transpose.packet[3]);
792 return sum;
793}

References Eigen::internal::PacketBlock< Packet, N >::packet, padd(), and ptranspose().

+ Here is the call graph for this function:

◆ preduxp< Packet4i >() [1/4]

602{
603 Packet4i v[4], sum[4];
604
605 // It's easier and faster to transpose then add as columns
606 // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
607 // Do the transpose, first set of moves
608 v[0] = vec_mergeh(vecs[0], vecs[2]);
609 v[1] = vec_mergel(vecs[0], vecs[2]);
610 v[2] = vec_mergeh(vecs[1], vecs[3]);
611 v[3] = vec_mergel(vecs[1], vecs[3]);
612 // Get the resulting vectors
613 sum[0] = vec_mergeh(v[0], v[2]);
614 sum[1] = vec_mergel(v[0], v[2]);
615 sum[2] = vec_mergeh(v[1], v[3]);
616 sum[3] = vec_mergel(v[1], v[3]);
617
618 // Now do the summation:
619 // Lines 0+1
620 sum[0] = sum[0] + sum[1];
621 // Lines 2+3
622 sum[1] = sum[2] + sum[3];
623 // Add the results
624 sum[0] = sum[0] + sum[1];
625
626 return sum[0];
627}

◆ preduxp< Packet4i >() [2/4]

410{
411 int32x4x2_t vtrn1, vtrn2, res1, res2;
412 Packet4i sum1, sum2, sum;
413
414 // NEON zip performs interleaving of the supplied vectors.
415 // We perform two interleaves in a row to acquire the transposed vector
416 vtrn1 = vzipq_s32(vecs[0], vecs[2]);
417 vtrn2 = vzipq_s32(vecs[1], vecs[3]);
418 res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
419 res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
420
421 // Do the addition of the resulting vectors
422 sum1 = vaddq_s32(res1.val[0], res1.val[1]);
423 sum2 = vaddq_s32(res2.val[0], res2.val[1]);
424 sum = vaddq_s32(sum1, sum2);
425
426 return sum;
427}

◆ preduxp< Packet4i >() [3/4]

582{
583 Packet4i tmp0, tmp1, tmp2;
584 tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
585 tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
586 tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
587 tmp0 = _mm_add_epi32(tmp0, tmp1);
588 tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
589 tmp1 = _mm_add_epi32(tmp1, tmp2);
590 tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
591 tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
592 return _mm_add_epi32(tmp0, tmp2);
593}

◆ preduxp< Packet4i >() [4/4]

742{
743 Packet4i v[4], sum[4];
744
745 // It's easier and faster to transpose then add as columns
746 // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
747 // Do the transpose, first set of moves
748 v[0] = vec_mergeh(vecs[0], vecs[2]);
749 v[1] = vec_mergel(vecs[0], vecs[2]);
750 v[2] = vec_mergeh(vecs[1], vecs[3]);
751 v[3] = vec_mergel(vecs[1], vecs[3]);
752 // Get the resulting vectors
753 sum[0] = vec_mergeh(v[0], v[2]);
754 sum[1] = vec_mergel(v[0], v[2]);
755 sum[2] = vec_mergeh(v[1], v[3]);
756 sum[3] = vec_mergel(v[1], v[3]);
757
758 // Now do the summation:
759 // Lines 0+1
760 sum[0] = padd<Packet4i>(sum[0], sum[1]);
761 // Lines 2+3
762 sum[1] = padd<Packet4i>(sum[2], sum[3]);
763 // Add the results
764 sum[0] = padd<Packet4i>(sum[0], sum[1]);
765
766 return sum[0];
767}

References padd< Packet4i >().

+ Here is the call graph for this function:

◆ preduxp< Packet8d >()

813{
814 Packet4d vecs0_0 = _mm512_extractf64x4_pd(vecs[0], 0);
815 Packet4d vecs0_1 = _mm512_extractf64x4_pd(vecs[0], 1);
816
817 Packet4d vecs1_0 = _mm512_extractf64x4_pd(vecs[1], 0);
818 Packet4d vecs1_1 = _mm512_extractf64x4_pd(vecs[1], 1);
819
820 Packet4d vecs2_0 = _mm512_extractf64x4_pd(vecs[2], 0);
821 Packet4d vecs2_1 = _mm512_extractf64x4_pd(vecs[2], 1);
822
823 Packet4d vecs3_0 = _mm512_extractf64x4_pd(vecs[3], 0);
824 Packet4d vecs3_1 = _mm512_extractf64x4_pd(vecs[3], 1);
825
826 Packet4d vecs4_0 = _mm512_extractf64x4_pd(vecs[4], 0);
827 Packet4d vecs4_1 = _mm512_extractf64x4_pd(vecs[4], 1);
828
829 Packet4d vecs5_0 = _mm512_extractf64x4_pd(vecs[5], 0);
830 Packet4d vecs5_1 = _mm512_extractf64x4_pd(vecs[5], 1);
831
832 Packet4d vecs6_0 = _mm512_extractf64x4_pd(vecs[6], 0);
833 Packet4d vecs6_1 = _mm512_extractf64x4_pd(vecs[6], 1);
834
835 Packet4d vecs7_0 = _mm512_extractf64x4_pd(vecs[7], 0);
836 Packet4d vecs7_1 = _mm512_extractf64x4_pd(vecs[7], 1);
837
838 Packet4d tmp0, tmp1;
839
840 tmp0 = _mm256_hadd_pd(vecs0_0, vecs1_0);
841 tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
842
843 tmp1 = _mm256_hadd_pd(vecs2_0, vecs3_0);
844 tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
845
846 __m256d final_0 = _mm256_blend_pd(tmp0, tmp1, 0xC);
847
848 tmp0 = _mm256_hadd_pd(vecs0_1, vecs1_1);
849 tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
850
851 tmp1 = _mm256_hadd_pd(vecs2_1, vecs3_1);
852 tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
853
854 final_0 = padd(final_0, _mm256_blend_pd(tmp0, tmp1, 0xC));
855
856 tmp0 = _mm256_hadd_pd(vecs4_0, vecs5_0);
857 tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
858
859 tmp1 = _mm256_hadd_pd(vecs6_0, vecs7_0);
860 tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
861
862 __m256d final_1 = _mm256_blend_pd(tmp0, tmp1, 0xC);
863
864 tmp0 = _mm256_hadd_pd(vecs4_1, vecs5_1);
865 tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
866
867 tmp1 = _mm256_hadd_pd(vecs6_1, vecs7_1);
868 tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
869
870 final_1 = padd(final_1, _mm256_blend_pd(tmp0, tmp1, 0xC));
871
872 __m512d final_output = _mm512_insertf64x4(final_output, final_0, 0);
873
874 return _mm512_insertf64x4(final_output, final_1, 1);
875}

References padd().

+ Here is the call graph for this function:

◆ preduxp< Packet8f >()

360{
361 __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
362 __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
363 __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
364 __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);
365
366 __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
367 __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
368 __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
369 __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
370
371 __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
372 __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
373 __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
374 __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
375
376 __m256 sum1 = _mm256_add_ps(perm1, hsum5);
377 __m256 sum2 = _mm256_add_ps(perm2, hsum6);
378 __m256 sum3 = _mm256_add_ps(perm3, hsum7);
379 __m256 sum4 = _mm256_add_ps(perm4, hsum8);
380
381 __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
382 __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
383
384 __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
385 return final;
386}

◆ prefetch()

template<typename Scalar >
EIGEN_DEVICE_FUNC void Eigen::internal::prefetch ( const Scalar *  addr)
inline
300{
301#ifdef __CUDA_ARCH__
302#if defined(__LP64__)
303 // 64-bit pointer operand constraint for inlined asm
304 asm(" prefetch.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
305#else
306 // 32-bit pointer operand constraint for inlined asm
307 asm(" prefetch.L1 [ %1 ];" : "=r"(addr) : "r"(addr));
308#endif
309#elif (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC)
310 __builtin_prefetch(addr);
311#endif
312}

Referenced by Eigen::internal::gebp_kernel< LhsScalar, RhsScalar, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs >::operator()(), Eigen::internal::BlasLinearMapper< Scalar, Index, AlignmentType >::prefetch(), and sparselu_gemm().

+ Here is the caller graph for this function:

◆ prefetch< double >() [1/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< double > ( const double *  addr)
313{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
const char * SsePrefetchPtrType
Definition PacketMath.h:415

◆ prefetch< double >() [2/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< double > ( const double *  addr)
622{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< double >() [3/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< double > ( const double *  addr)
420{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< double >() [4/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< double > ( const double *  addr)
682{ EIGEN_ZVECTOR_PREFETCH(addr); }
#define EIGEN_ZVECTOR_PREFETCH(ADDR)
Definition PacketMath.h:131

References EIGEN_ZVECTOR_PREFETCH.

◆ prefetch< float >() [1/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< float > ( const float *  addr)
534{ EIGEN_PPC_PREFETCH(addr); }
#define EIGEN_PPC_PREFETCH(ADDR)
Definition PacketMath.h:129

References EIGEN_PPC_PREFETCH.

◆ prefetch< float >() [2/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< float > ( const float *  addr)
312{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< float >() [3/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< float > ( const float *  addr)
621{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< float >() [4/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< float > ( const float *  addr)
338{ EIGEN_ARM_PREFETCH(addr); }
#define EIGEN_ARM_PREFETCH(ADDR)
Definition PacketMath.h:98

References EIGEN_ARM_PREFETCH.

◆ prefetch< float >() [5/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< float > ( const float *  addr)
419{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< float >() [6/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< float > ( const float *  addr)

◆ prefetch< int >() [1/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< int > ( const int *  addr)
535{ EIGEN_PPC_PREFETCH(addr); }

References EIGEN_PPC_PREFETCH.

◆ prefetch< int >() [2/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< int > ( const int *  addr)
314{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< int >() [3/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< int > ( const int *  addr)
623{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< int >() [4/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< int > ( const int *  addr)
421{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< int >() [5/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< int > ( const int *  addr)

◆ prefetch< int32_t >()

◆ prefetch< std::complex< double > >() [1/2]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< std::complex< double > > ( const std::complex< double > *  addr)
327{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< std::complex< double > >() [2/2]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< std::complex< double > > ( const std::complex< double > *  addr)

◆ prefetch< std::complex< float > >() [1/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< std::complex< float > > ( const std::complex< float > *  addr)
127{ EIGEN_PPC_PREFETCH(addr); }

References EIGEN_PPC_PREFETCH.

◆ prefetch< std::complex< float > >() [2/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< std::complex< float > > ( const std::complex< float > *  addr)
145{ EIGEN_ARM_PREFETCH((const float *)addr); }

References EIGEN_ARM_PREFETCH.

◆ prefetch< std::complex< float > >() [3/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< std::complex< float > > ( const std::complex< float > *  addr)
131{ _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }

◆ prefetch< std::complex< float > >() [4/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::prefetch< std::complex< float > > ( const std::complex< float > *  addr)

◆ preverse() [1/23]

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::preverse ( const Packet a)
inline
349{ return a; }

◆ preverse() [2/23]

template<>
EIGEN_STRONG_INLINE Packet16f Eigen::internal::preverse ( const Packet16f a)
639{
640 return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a);
641}

◆ preverse() [3/23]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::preverse ( const Packet1cd a)
336{ return a; }

◆ preverse() [4/23]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::preverse ( const Packet1cd a)
204{ return a; }

◆ preverse() [5/23]

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::preverse ( const Packet2cd a)
319 {
320 __m256d result = _mm256_permute2f128_pd(a.v, a.v, 1);
321 return Packet2cd(result);
322}

◆ preverse() [6/23]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::preverse ( const Packet2cf a)
138{
139 Packet4f rev_a;
140 rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2);
141 return Packet2cf(rev_a);
142}

References p16uc_COMPLEX32_REV2.

Referenced by Eigen::internal::unary_evaluator< Reverse< ArgType, Direction > >::packet(), pcplxflip(), preverse(), Eigen::internal::reverse_packet_cond< PacketType, ReversePacket >::run(), Eigen::internal::quat_product< Architecture::SSE, Derived, OtherDerived, double >::run(), and Eigen::internal::unary_evaluator< Reverse< ArgType, Direction > >::writePacket().

+ Here is the caller graph for this function:

◆ preverse() [7/23]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::preverse ( const Packet2cf a)
155{
156 float32x2_t a_lo, a_hi;
157 Packet4f a_r128;
158
159 a_lo = vget_low_f32(a.v);
160 a_hi = vget_high_f32(a.v);
161 a_r128 = vcombine_f32(a_hi, a_lo);
162
163 return Packet2cf(a_r128);
164}

◆ preverse() [8/23]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::preverse ( const Packet2cf a)
148{ return Packet2cf(_mm_castpd_ps(preverse(Packet2d(_mm_castps_pd(a.v))))); }

References preverse().

+ Here is the call graph for this function:

◆ preverse() [9/23]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::preverse ( const Packet2cf a)
206{
207 Packet2cf res;
208 res.cd[0] = a.cd[1];
209 res.cd[1] = a.cd[0];
210 return res;
211}

◆ preverse() [10/23]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::preverse ( const Packet2d a)
444{ return _mm_shuffle_pd(a,a,0x1); }

◆ preverse() [11/23]

template<>
EIGEN_STRONG_INLINE Packet2d Eigen::internal::preverse ( const Packet2d a)
694{
695 return reinterpret_cast<Packet2d>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE64));
696}

References p16uc_REVERSE64.

◆ preverse() [12/23]

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::preverse ( const Packet4cf a)
124 {
125 __m128 low = _mm256_extractf128_ps(a.v, 0);
126 __m128 high = _mm256_extractf128_ps(a.v, 1);
127 __m128d lowd = _mm_castps_pd(low);
128 __m128d highd = _mm_castps_pd(high);
129 low = _mm_castpd_ps(_mm_shuffle_pd(lowd,lowd,0x1));
130 high = _mm_castpd_ps(_mm_shuffle_pd(highd,highd,0x1));
131 __m256 result = _mm256_setzero_ps();
132 result = _mm256_insertf128_ps(result, low, 1);
133 result = _mm256_insertf128_ps(result, high, 0);
134 return Packet4cf(result);
135}

◆ preverse() [13/23]

template<>
EIGEN_STRONG_INLINE Packet4d Eigen::internal::preverse ( const Packet4d a)
334{
335 __m256d tmp = _mm256_shuffle_pd(a,a,5);
336 return _mm256_permute2f128_pd(tmp, tmp, 1);
337 #if 0
338 // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
339 // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
340 __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
341 return _mm256_permute_pd(swap_halves,5);
342 #endif
343}

◆ preverse() [14/23]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::preverse ( const Packet4f a)
541{
542 return reinterpret_cast<Packet4f>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
543}

References p16uc_REVERSE32.

◆ preverse() [15/23]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::preverse ( const Packet4f a)
345 {
346 float32x2_t a_lo, a_hi;
347 Packet4f a_r64;
348
349 a_r64 = vrev64q_f32(a);
350 a_lo = vget_low_f32(a_r64);
351 a_hi = vget_high_f32(a_r64);
352 return vcombine_f32(a_hi, a_lo);
353}

◆ preverse() [16/23]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::preverse ( const Packet4f a)
442{ return _mm_shuffle_ps(a,a,0x1B); }

◆ preverse() [17/23]

template<>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::preverse ( const Packet4f a)
699{
700 Packet4f rev;
701 rev.v4f[0] = preverse<Packet2d>(a.v4f[1]);
702 rev.v4f[1] = preverse<Packet2d>(a.v4f[0]);
703 return rev;
704}

References Eigen::internal::Packet4f::v4f.

◆ preverse() [18/23]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::preverse ( const Packet4i a)
545{
546 return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32)); }

References p16uc_REVERSE32.

◆ preverse() [19/23]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::preverse ( const Packet4i a)
354 {
355 int32x2_t a_lo, a_hi;
356 Packet4i a_r64;
357
358 a_r64 = vrev64q_s32(a);
359 a_lo = vget_low_s32(a_r64);
360 a_hi = vget_high_s32(a_r64);
361 return vcombine_s32(a_hi, a_lo);
362}

◆ preverse() [20/23]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::preverse ( const Packet4i a)
446{ return _mm_shuffle_epi32(a,0x1B); }

◆ preverse() [21/23]

template<>
EIGEN_STRONG_INLINE Packet4i Eigen::internal::preverse ( const Packet4i a)
689{
690 return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));
691}

References p16uc_REVERSE32.

◆ preverse() [22/23]

template<>
EIGEN_STRONG_INLINE Packet8d Eigen::internal::preverse ( const Packet8d a)
644{
645 return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a);
646}

◆ preverse() [23/23]

template<>
EIGEN_STRONG_INLINE Packet8f Eigen::internal::preverse ( const Packet8f a)
329{
330 __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
331 return _mm256_permute2f128_ps(tmp, tmp, 1);
332}

◆ print_matrix()

template<typename Derived >
std::ostream & Eigen::internal::print_matrix ( std::ostream &  s,
const Derived &  _m,
const IOFormat fmt 
)
130{
131 if(_m.size() == 0)
132 {
133 s << fmt.matPrefix << fmt.matSuffix;
134 return s;
135 }
136
137 typename Derived::Nested m = _m;
138 typedef typename Derived::Scalar Scalar;
139
140 Index width = 0;
141
142 std::streamsize explicit_precision;
143 if(fmt.precision == StreamPrecision)
144 {
145 explicit_precision = 0;
146 }
147 else if(fmt.precision == FullPrecision)
148 {
149 if (NumTraits<Scalar>::IsInteger)
150 {
151 explicit_precision = 0;
152 }
153 else
154 {
155 explicit_precision = significant_decimals_impl<Scalar>::run();
156 }
157 }
158 else
159 {
160 explicit_precision = fmt.precision;
161 }
162
163 std::streamsize old_precision = 0;
164 if(explicit_precision) old_precision = s.precision(explicit_precision);
165
166 bool align_cols = !(fmt.flags & DontAlignCols);
167 if(align_cols)
168 {
169 // compute the largest width
170 for(Index j = 0; j < m.cols(); ++j)
171 for(Index i = 0; i < m.rows(); ++i)
172 {
173 std::stringstream sstr;
174 sstr.copyfmt(s);
175 sstr << m.coeff(i,j);
176 width = std::max<Index>(width, Index(sstr.str().length()));
177 }
178 }
179 s << fmt.matPrefix;
180 for(Index i = 0; i < m.rows(); ++i)
181 {
182 if (i)
183 s << fmt.rowSpacer;
184 s << fmt.rowPrefix;
185 if(width) s.width(width);
186 s << m.coeff(i, 0);
187 for(Index j = 1; j < m.cols(); ++j)
188 {
189 s << fmt.coeffSeparator;
190 if (width) s.width(width);
191 s << m.coeff(i, j);
192 }
193 s << fmt.rowSuffix;
194 if( i < m.rows() - 1)
195 s << fmt.rowSeparator;
196 }
197 s << fmt.matSuffix;
198 if(explicit_precision) s.precision(old_precision);
199 return s;
200}
@ DontAlignCols
Definition IO.h:16
coord_t width(const BoundingBox &box)
Definition Arrange.cpp:539
std::string rowPrefix
Definition IO.h:72
std::string rowSuffix
Definition IO.h:72
std::string coeffSeparator
Definition IO.h:73
std::string rowSpacer
Definition IO.h:72
std::string matPrefix
Definition IO.h:71
std::string rowSeparator
Definition IO.h:72
std::string matSuffix
Definition IO.h:71
int precision
Definition IO.h:74
int flags
Definition IO.h:75

References Eigen::IOFormat::coeffSeparator, Eigen::DontAlignCols, Eigen::IOFormat::flags, Eigen::FullPrecision, Eigen::IOFormat::matPrefix, Eigen::IOFormat::matSuffix, Eigen::IOFormat::precision, Eigen::IOFormat::rowPrefix, Eigen::IOFormat::rowSeparator, Eigen::IOFormat::rowSpacer, Eigen::IOFormat::rowSuffix, Eigen::internal::significant_decimals_impl< Scalar >::run(), and Eigen::StreamPrecision.

Referenced by Eigen::DenseBase< Derived >::operator<<().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pround()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::pround ( const Packet a)
428{ using numext::round; return round(a); }
EIGEN_DEVICE_FUNC const RoundReturnType round() const
Definition ArrayCwiseUnaryOps.h:374

References round().

+ Here is the call graph for this function:

◆ pround< Packet2d >()

633{ return vec_round(a); }

◆ pround< Packet4d >()

194{ return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }

◆ pround< Packet4f >() [1/2]

427{ return vec_round(a); }

◆ pround< Packet4f >() [2/2]

627{
628 Packet4f res;
629 res.v4f[0] = vec_round(a.v4f[0]);
630 res.v4f[1] = vec_round(a.v4f[1]);
631 return res;
632}

References Eigen::internal::Packet4f::v4f.

◆ pround< Packet8f >()

193{ return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }

◆ prsqrt()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::prsqrt ( const Packet a)
422 {
423 return pdiv(pset1<Packet>(1), psqrt(a));
424}
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet psqrt(const Packet &a)
Definition GenericPacketMath.h:418

References pdiv(), and psqrt().

+ Here is the call graph for this function:

◆ prsqrt< Packet2d >() [1/2]

522 {
523 // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation.
524 return _mm_div_pd(pset1<Packet2d>(1.0), _mm_sqrt_pd(x));
525}

References pset1< Packet2d >().

Referenced by prsqrt< Packet4f >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ prsqrt< Packet2d >() [2/2]

120 {
121 // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation.
122 return pset1<Packet2d>(1.0) / psqrt<Packet2d>(x);
123}

References pset1< Packet2d >(), and psqrt< Packet2d >().

+ Here is the call graph for this function:

◆ prsqrt< Packet4d >()

429 {
431 return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(x));
432}

References _EIGEN_DECLARE_CONST_Packet4d.

◆ prsqrt< Packet4f >() [1/3]

◆ prsqrt< Packet4f >() [2/3]

514 {
515 // Unfortunately we can't use the much faster mm_rqsrt_ps since it only provides an approximation.
516 return _mm_div_ps(pset1<Packet4f>(1.0f), _mm_sqrt_ps(x));
517}

References pset1< Packet4f >().

+ Here is the call graph for this function:

◆ prsqrt< Packet4f >() [3/3]

126 {
127 Packet4f res;
128 res.v4f[0] = prsqrt<Packet2d>(x.v4f[0]);
129 res.v4f[1] = prsqrt<Packet2d>(x.v4f[1]);
130 return res;
131}

References prsqrt< Packet2d >(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ prsqrt< Packet8f >()

422 {
424 return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(x));
425}

References _EIGEN_DECLARE_CONST_Packet8f.

◆ pscatter()

template<typename Scalar , typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter ( Scalar *  to,
const Packet from,
Index   
)
inline
296 { pstore(to, from); }

References pstore().

+ Here is the call graph for this function:

◆ pscatter< double, Packet2d >() [1/2]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< double, Packet2d > ( double *  to,
const Packet2d from,
Index  stride 
)
inline
387{
388 to[stride*0] = _mm_cvtsd_f64(from);
389 to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));
390}

◆ pscatter< double, Packet2d >() [2/2]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< double, Packet2d > ( double *  to,
const Packet2d from,
Index  stride 
)
inline
491{
492 double EIGEN_ALIGN16 af[2];
493 pstore<double>(af, from);
494 to[0*stride] = af[0];
495 to[1*stride] = af[1];
496}

References EIGEN_ALIGN16, and pstore< double >().

+ Here is the call graph for this function:

◆ pscatter< double, Packet4d >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< double, Packet4d > ( double *  to,
const Packet4d from,
Index  stride 
)
inline
286{
287 __m128d low = _mm256_extractf128_pd(from, 0);
288 to[stride*0] = _mm_cvtsd_f64(low);
289 to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
290 __m128d high = _mm256_extractf128_pd(from, 1);
291 to[stride*2] = _mm_cvtsd_f64(high);
292 to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
293}

◆ pscatter< double, Packet8d >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< double, Packet8d > ( double *  to,
const Packet8d from,
Index  stride 
)
inline
598 {
599 Packet8i stride_vector = _mm256_set1_epi32(stride);
600 Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
601 Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);
602 _mm512_i32scatter_pd(to, indices, from, 8);
603}

◆ pscatter< float, Packet16f >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< float, Packet16f > ( float *  to,
const Packet16f from,
Index  stride 
)
inline
588 {
589 Packet16i stride_vector = _mm512_set1_epi32(stride);
590 Packet16i stride_multiplier =
591 _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
592 Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);
593 _mm512_i32scatter_ps(to, indices, from, 4);
594}

◆ pscatter< float, Packet4f >() [1/4]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< float, Packet4f > ( float *  to,
const Packet4f from,
Index  stride 
)
inline
328{
329 float EIGEN_ALIGN16 af[4];
330 pstore<float>(af, from);
331 to[0*stride] = af[0];
332 to[1*stride] = af[1];
333 to[2*stride] = af[2];
334 to[3*stride] = af[3];
335}

References EIGEN_ALIGN16, and pstore< float >().

+ Here is the call graph for this function:

◆ pscatter< float, Packet4f >() [2/4]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< float, Packet4f > ( float *  to,
const Packet4f from,
Index  stride 
)
inline
324{
325 to[stride*0] = vgetq_lane_f32(from, 0);
326 to[stride*1] = vgetq_lane_f32(from, 1);
327 to[stride*2] = vgetq_lane_f32(from, 2);
328 to[stride*3] = vgetq_lane_f32(from, 3);
329}

◆ pscatter< float, Packet4f >() [3/4]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< float, Packet4f > ( float *  to,
const Packet4f from,
Index  stride 
)
inline
380{
381 to[stride*0] = _mm_cvtss_f32(from);
382 to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));
383 to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));
384 to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));
385}

◆ pscatter< float, Packet4f >() [4/4]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< float, Packet4f > ( float *  to,
const Packet4f from,
Index  stride 
)
inline
481{
482 float EIGEN_ALIGN16 ai[4];
483 pstore<float>((float *)ai, from);
484 to[0*stride] = ai[0];
485 to[1*stride] = ai[1];
486 to[2*stride] = ai[2];
487 to[3*stride] = ai[3];
488}

References EIGEN_ALIGN16, and pstore< float >().

+ Here is the call graph for this function:

◆ pscatter< float, Packet8f >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< float, Packet8f > ( float *  to,
const Packet8f from,
Index  stride 
)
inline
272{
273 __m128 low = _mm256_extractf128_ps(from, 0);
274 to[stride*0] = _mm_cvtss_f32(low);
275 to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
276 to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
277 to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
278
279 __m128 high = _mm256_extractf128_ps(from, 1);
280 to[stride*4] = _mm_cvtss_f32(high);
281 to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
282 to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
283 to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
284}

◆ pscatter< int, Packet4i >() [1/3]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< int, Packet4i > ( int *  to,
const Packet4i from,
Index  stride 
)
inline
337{
338 int EIGEN_ALIGN16 ai[4];
339 pstore<int>((int *)ai, from);
340 to[0*stride] = ai[0];
341 to[1*stride] = ai[1];
342 to[2*stride] = ai[2];
343 to[3*stride] = ai[3];
344}

References EIGEN_ALIGN16, and pstore< int >().

+ Here is the call graph for this function:

◆ pscatter< int, Packet4i >() [2/3]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< int, Packet4i > ( int *  to,
const Packet4i from,
Index  stride 
)
inline
392{
393 to[stride*0] = _mm_cvtsi128_si32(from);
394 to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));
395 to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));
396 to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));
397}

◆ pscatter< int, Packet4i >() [3/3]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< int, Packet4i > ( int *  to,
const Packet4i from,
Index  stride 
)
inline
471{
472 int EIGEN_ALIGN16 ai[4];
473 pstore<int>((int *)ai, from);
474 to[0*stride] = ai[0];
475 to[1*stride] = ai[1];
476 to[2*stride] = ai[2];
477 to[3*stride] = ai[3];
478}

References EIGEN_ALIGN16, and pstore< int >().

+ Here is the call graph for this function:

◆ pscatter< int32_t, Packet4i >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< int32_t, Packet4i > ( int32_t to,
const Packet4i from,
Index  stride 
)
inline
331{
332 to[stride*0] = vgetq_lane_s32(from, 0);
333 to[stride*1] = vgetq_lane_s32(from, 1);
334 to[stride*2] = vgetq_lane_s32(from, 2);
335 to[stride*3] = vgetq_lane_s32(from, 3);
336}

◆ pscatter< std::complex< double >, Packet1cd >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< double >, Packet1cd > ( std::complex< double > *  to,
const Packet1cd from,
Index stride  EIGEN_UNUSED 
)
inline
130{
131 pstore<std::complex<double> >(to, from);
132}

◆ pscatter< std::complex< double >, Packet2cd >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< double >, Packet2cd > ( std::complex< double > *  to,
const Packet2cd from,
Index  stride 
)
inline
304{
305 __m128d low = _mm256_extractf128_pd(from.v, 0);
306 to[stride*0] = std::complex<double>(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)));
307 __m128d high = _mm256_extractf128_pd(from.v, 1);
308 to[stride*1] = std::complex<double>(_mm_cvtsd_f64(high), _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1)));
309}

◆ pscatter< std::complex< float >, Packet2cf >() [1/4]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< float >, Packet2cf > ( std::complex< float > *  to,
const Packet2cf from,
Index  stride 
)
inline
91{
92 std::complex<float> EIGEN_ALIGN16 af[2];
93 pstore<std::complex<float> >((std::complex<float> *) af, from);
94 to[0*stride] = af[0];
95 to[1*stride] = af[1];
96}

References EIGEN_ALIGN16.

◆ pscatter< std::complex< float >, Packet2cf >() [2/4]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< float >, Packet2cf > ( std::complex< float > *  to,
const Packet2cf from,
Index  stride 
)
inline
140{
141 to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));
142 to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));
143}

◆ pscatter< std::complex< float >, Packet2cf >() [3/4]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< float >, Packet2cf > ( std::complex< float > *  to,
const Packet2cf from,
Index  stride 
)
inline
124{
125 to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)),
126 _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1)));
127 to[stride*1] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 2)),
128 _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 3)));
129}

◆ pscatter< std::complex< float >, Packet2cf >() [4/4]

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< float >, Packet2cf > ( std::complex< float > *  to,
const Packet2cf from,
Index  stride 
)
inline
123{
124 std::complex<float> EIGEN_ALIGN16 af[2];
125 pstore<std::complex<float> >((std::complex<float> *) af, from);
126 to[0*stride] = af[0];
127 to[1*stride] = af[1];
128}

References EIGEN_ALIGN16.

◆ pscatter< std::complex< float >, Packet4cf >()

template<>
EIGEN_DEVICE_FUNC void Eigen::internal::pscatter< std::complex< float >, Packet4cf > ( std::complex< float > *  to,
const Packet4cf from,
Index  stride 
)
inline
104{
105 __m128 low = _mm256_extractf128_ps(from.v, 0);
106 to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)),
107 _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1)));
108 to[stride*1] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 2)),
109 _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3)));
110
111 __m128 high = _mm256_extractf128_ps(from.v, 1);
112 to[stride*2] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 0)),
113 _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1)));
114 to[stride*3] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)),
115 _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3)));
116
117}

◆ pset1()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pset1 ( const typename unpacket_traits< Packet >::type &  a)
inline
222{ return a; }

◆ pset1< Packet16f >()

117 {
118 return _mm512_set1_ps(from);
119}

Referenced by pstore1< Packet16f >().

+ Here is the caller graph for this function:

◆ pset1< Packet16i >()

125 {
126 return _mm512_set1_epi32(from);
127}

Referenced by pstore1< Packet16i >().

+ Here is the caller graph for this function:

◆ pset1< Packet1cd >() [1/2]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pset1< Packet1cd > ( const std::complex< double > &  from)
319{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }

References ploadu< Packet1cd >().

Referenced by pinsertfirst(), pinsertlast(), and ploaddup< Packet1cd >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pset1< Packet1cd >() [2/2]

template<>
EIGEN_STRONG_INLINE Packet1cd Eigen::internal::pset1< Packet1cd > ( const std::complex< double > &  from)
102{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }

References ploadu< Packet1cd >().

+ Here is the call graph for this function:

◆ pset1< Packet2cd >()

template<>
EIGEN_STRONG_INLINE Packet2cd Eigen::internal::pset1< Packet2cd > ( const std::complex< double > &  from)
286{
287 // in case casting to a __m128d* is really not safe, then we can still fallback to this version: (much slower though)
288// return Packet2cd(_mm256_loadu2_m128d((const double*)&from,(const double*)&from));
289 return Packet2cd(_mm256_broadcast_pd((const __m128d*)(const void*)&from));
290}

Referenced by pinsertfirst(), pinsertlast(), and ploaddup< Packet2cd >().

+ Here is the caller graph for this function:

◆ pset1< Packet2cf >() [1/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pset1< Packet2cf > ( const std::complex< float > &  from)
66{
67 Packet2cf res;
68 if((std::ptrdiff_t(&from) % 16) == 0)
69 res.v = pload<Packet4f>((const float *)&from);
70 else
71 res.v = ploadu<Packet4f>((const float *)&from);
72 res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI);
73 return res;
74}

References p16uc_PSET64_HI, pload< Packet4f >(), ploadu< Packet4f >(), and Eigen::internal::Packet2cf::v.

Referenced by ploaddup< Packet2cf >().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ pset1< Packet2cf >() [2/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pset1< Packet2cf > ( const std::complex< float > &  from)
68{
69 float32x2_t r64;
70 r64 = vld1_f32((const float *)&from);
71
72 return Packet2cf(vcombine_f32(r64, r64));
73}

◆ pset1< Packet2cf >() [3/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pset1< Packet2cf > ( const std::complex< float > &  from)
94{
95 Packet2cf res;
96#if EIGEN_GNUC_AT_MOST(4,2)
97 // Workaround annoying "may be used uninitialized in this function" warning with gcc 4.2
98 res.v = _mm_loadl_pi(_mm_set1_ps(0.0f), reinterpret_cast<const __m64*>(&from));
99#elif EIGEN_GNUC_AT_LEAST(4,6)
100 // Suppress annoying "may be used uninitialized in this function" warning with gcc >= 4.6
101 #pragma GCC diagnostic push
102 #pragma GCC diagnostic ignored "-Wuninitialized"
103 res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
104 #pragma GCC diagnostic pop
105#else
106 res.v = _mm_loadl_pi(res.v, (const __m64*)&from);
107#endif
108 return Packet2cf(_mm_movelh_ps(res.v,res.v));
109}

◆ pset1< Packet2cf >() [4/4]

template<>
EIGEN_STRONG_INLINE Packet2cf Eigen::internal::pset1< Packet2cf > ( const std::complex< float > &  from)
105{
106 Packet2cf res;
107 res.cd[0] = Packet1cd(vec_ld2f((const float *)&from));
108 res.cd[1] = res.cd[0];
109 return res;
110}

◆ pset1< Packet2d >() [1/2]

179{ return _mm_set1_pd(from); }

Referenced by pinsertfirst(), pinsertlast(), ploaddup< Packet2d >(), plset< Packet2d >(), prsqrt< Packet2d >(), pset1< Packet4f >(), and Eigen::internal::quat_product< Architecture::SSE, Derived, OtherDerived, double >::run().

+ Here is the caller graph for this function:

◆ pset1< Packet2d >() [2/2]

397 {
398 return vec_splats(from);
399}

◆ pset1< Packet4cf >()

template<>
EIGEN_STRONG_INLINE Packet4cf Eigen::internal::pset1< Packet4cf > ( const std::complex< float > &  from)
80{
81 return Packet4cf(_mm256_castpd_ps(_mm256_broadcast_sd((const double*)(const void*)&from)));
82}

Referenced by pinsertfirst(), and pinsertlast().

+ Here is the caller graph for this function:

◆ pset1< Packet4d >()

121{ return _mm256_set1_pd(from); }

Referenced by pinsertfirst(), pinsertlast(), and pstore1< Packet4d >().

+ Here is the caller graph for this function:

◆ pset1< Packet4f >() [1/4]

279 {
280 Packet4f v = {from, from, from, from};
281 return v;
282}

Referenced by pgather< float, Packet4f >(), pgather< std::complex< float >, Packet2cf >(), pinsertfirst(), pinsertlast(), plset< Packet4f >(), and prsqrt< Packet4f >().

+ Here is the caller graph for this function:

◆ pset1< Packet4f >() [2/4]

145{ return vdupq_n_f32(from); }

◆ pset1< Packet4f >() [3/4]

178{ return _mm_set_ps1(from); }

◆ pset1< Packet4f >() [4/4]

401{
402 Packet4f to;
403 to.v4f[0] = pset1<Packet2d>(static_cast<const double&>(from));
404 to.v4f[1] = to.v4f[0];
405 return to;
406}

References pset1< Packet2d >(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pset1< Packet4i >() [1/4]

284 {
285 Packet4i v = {from, from, from, from};
286 return v;
287}

Referenced by pdiv< Packet4i >(), pgather< int32_t, Packet4i >(), plset< Packet4i >(), and plset< Packet4i >().

+ Here is the caller graph for this function:

◆ pset1< Packet4i >() [2/4]

180{ return _mm_set1_epi32(from); }

◆ pset1< Packet4i >() [3/4]

394{
395 return vec_splats(from);
396}

◆ pset1< Packet4i >() [4/4]

146{ return vdupq_n_s32(from); }

◆ pset1< Packet8d >()

121 {
122 return _mm512_set1_pd(from);
123}

Referenced by pstore1< Packet8d >().

+ Here is the caller graph for this function:

◆ pset1< Packet8f >()

120{ return _mm256_set1_ps(from); }

Referenced by pinsertfirst(), pinsertlast(), and pstore1< Packet8f >().

+ Here is the caller graph for this function:

◆ pset1< Packet8i >()

122{ return _mm256_set1_epi32(from); }

Referenced by pdiv< Packet8i >(), and pstore1< Packet8i >().

+ Here is the caller graph for this function:

◆ pshiftleft()

Packet8i Eigen::internal::pshiftleft ( Packet8i  v,
int  n 
)
inline
22{
23#ifdef EIGEN_VECTORIZE_AVX2
24 return _mm256_slli_epi32(v, n);
25#else
26 __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(v, 0), n);
27 __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(v, 1), n);
28 return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
29#endif
30}

Referenced by pexp< Packet8f >(), and psin< Packet8f >().

+ Here is the caller graph for this function:

◆ pshiftright()

Packet8f Eigen::internal::pshiftright ( Packet8f  v,
int  n 
)
inline
33{
34#ifdef EIGEN_VECTORIZE_AVX2
35 return _mm256_cvtepi32_ps(_mm256_srli_epi32(_mm256_castps_si256(v), n));
36#else
37 __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 0), n);
38 __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 1), n);
39 return _mm256_cvtepi32_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1));
40#endif
41}

Referenced by plog< Packet8f >().

+ Here is the caller graph for this function:

◆ psin()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::psin ( const Packet a)
366{ using std::sin; return sin(a); }
EIGEN_DEVICE_FUNC const SinReturnType sin() const
Definition ArrayCwiseUnaryOps.h:220

References sin().

+ Here is the call graph for this function:

◆ psin< Packet4f >()

259{
260 Packet4f x = _x;
263
268
269 _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);
270
271 _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);
272 _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);
273 _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);
274 _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);
275 _EIGEN_DECLARE_CONST_Packet4f(sincof_p1, 8.3321608736E-3f);
276 _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);
277 _EIGEN_DECLARE_CONST_Packet4f(coscof_p0, 2.443315711809948E-005f);
278 _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);
279 _EIGEN_DECLARE_CONST_Packet4f(coscof_p2, 4.166664568298827E-002f);
280 _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI
281
282 Packet4f xmm1, xmm2, xmm3, sign_bit, y;
283
284 Packet4i emm0, emm2;
285 sign_bit = x;
286 /* take the absolute value */
287 x = pabs(x);
288
289 /* take the modulo */
290
291 /* extract the sign bit (upper one) */
292 sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);
293
294 /* scale by 4/Pi */
295 y = pmul(x, p4f_cephes_FOPI);
296
297 /* store the integer part of y in mm0 */
298 emm2 = _mm_cvttps_epi32(y);
299 /* j=(j+1) & (~1) (see the cephes sources) */
300 emm2 = _mm_add_epi32(emm2, p4i_1);
301 emm2 = _mm_and_si128(emm2, p4i_not1);
302 y = _mm_cvtepi32_ps(emm2);
303 /* get the swap sign flag */
304 emm0 = _mm_and_si128(emm2, p4i_4);
305 emm0 = _mm_slli_epi32(emm0, 29);
306 /* get the polynom selection mask
307 there is one polynom for 0 <= x <= Pi/4
308 and another one for Pi/4<x<=Pi/2
309
310 Both branches will be computed.
311 */
312 emm2 = _mm_and_si128(emm2, p4i_2);
313 emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
314
315 Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);
316 Packet4f poly_mask = _mm_castsi128_ps(emm2);
317 sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
318
319 /* The magic pass: "Extended precision modular arithmetic"
320 x = ((x - y * DP1) - y * DP2) - y * DP3; */
321 xmm1 = pmul(y, p4f_minus_cephes_DP1);
322 xmm2 = pmul(y, p4f_minus_cephes_DP2);
323 xmm3 = pmul(y, p4f_minus_cephes_DP3);
324 x = padd(x, xmm1);
325 x = padd(x, xmm2);
326 x = padd(x, xmm3);
327
328 /* Evaluate the first polynom (0 <= x <= Pi/4) */
329 y = p4f_coscof_p0;
330 Packet4f z = _mm_mul_ps(x,x);
331
332 y = pmadd(y, z, p4f_coscof_p1);
333 y = pmadd(y, z, p4f_coscof_p2);
334 y = pmul(y, z);
335 y = pmul(y, z);
336 Packet4f tmp = pmul(z, p4f_half);
337 y = psub(y, tmp);
338 y = padd(y, p4f_1);
339
340 /* Evaluate the second polynom (Pi/4 <= x <= 0) */
341
342 Packet4f y2 = p4f_sincof_p0;
343 y2 = pmadd(y2, z, p4f_sincof_p1);
344 y2 = pmadd(y2, z, p4f_sincof_p2);
345 y2 = pmul(y2, z);
346 y2 = pmul(y2, x);
347 y2 = padd(y2, x);
348
349 /* select the correct result from the two polynoms */
350 y2 = _mm_and_ps(poly_mask, y2);
351 y = _mm_andnot_ps(poly_mask, y);
352 y = _mm_or_ps(y,y2);
353 /* update the sign */
354 return _mm_xor_ps(y, sign_bit);
355}

References _EIGEN_DECLARE_CONST_Packet4f, _EIGEN_DECLARE_CONST_Packet4f_FROM_INT, _EIGEN_DECLARE_CONST_Packet4i, pabs(), padd(), pmadd(), pmul(), psub(), and y.

+ Here is the call graph for this function:

◆ psin< Packet8f >()

49 {
50 Packet8f x = _x;
51
52 // Some useful values.
56 _EIGEN_DECLARE_CONST_Packet8f(one_over_four, 0.25f);
57 _EIGEN_DECLARE_CONST_Packet8f(one_over_pi, 3.183098861837907e-01f);
58 _EIGEN_DECLARE_CONST_Packet8f(neg_pi_first, -3.140625000000000e+00f);
59 _EIGEN_DECLARE_CONST_Packet8f(neg_pi_second, -9.670257568359375e-04f);
60 _EIGEN_DECLARE_CONST_Packet8f(neg_pi_third, -6.278329571784980e-07f);
61 _EIGEN_DECLARE_CONST_Packet8f(four_over_pi, 1.273239544735163e+00f);
62
63 // Map x from [-Pi/4,3*Pi/4] to z in [-1,3] and subtract the shifted period.
64 Packet8f z = pmul(x, p8f_one_over_pi);
65 Packet8f shift = _mm256_floor_ps(padd(z, p8f_one_over_four));
66 x = pmadd(shift, p8f_neg_pi_first, x);
67 x = pmadd(shift, p8f_neg_pi_second, x);
68 x = pmadd(shift, p8f_neg_pi_third, x);
69 z = pmul(x, p8f_four_over_pi);
70
71 // Make a mask for the entries that need flipping, i.e. wherever the shift
72 // is odd.
73 Packet8i shift_ints = _mm256_cvtps_epi32(shift);
74 Packet8i shift_isodd = _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(shift_ints), _mm256_castsi256_ps(p8i_one)));
75 Packet8i sign_flip_mask = pshiftleft(shift_isodd, 31);
76
77 // Create a mask for which interpolant to use, i.e. if z > 1, then the mask
78 // is set to ones for that entry.
79 Packet8f ival_mask = _mm256_cmp_ps(z, p8f_one, _CMP_GT_OQ);
80
81 // Evaluate the polynomial for the interval [1,3] in z.
82 _EIGEN_DECLARE_CONST_Packet8f(coeff_right_0, 9.999999724233232e-01f);
83 _EIGEN_DECLARE_CONST_Packet8f(coeff_right_2, -3.084242535619928e-01f);
84 _EIGEN_DECLARE_CONST_Packet8f(coeff_right_4, 1.584991525700324e-02f);
85 _EIGEN_DECLARE_CONST_Packet8f(coeff_right_6, -3.188805084631342e-04f);
86 Packet8f z_minus_two = psub(z, p8f_two);
87 Packet8f z_minus_two2 = pmul(z_minus_two, z_minus_two);
88 Packet8f right = pmadd(p8f_coeff_right_6, z_minus_two2, p8f_coeff_right_4);
89 right = pmadd(right, z_minus_two2, p8f_coeff_right_2);
90 right = pmadd(right, z_minus_two2, p8f_coeff_right_0);
91
92 // Evaluate the polynomial for the interval [-1,1] in z.
93 _EIGEN_DECLARE_CONST_Packet8f(coeff_left_1, 7.853981525427295e-01f);
94 _EIGEN_DECLARE_CONST_Packet8f(coeff_left_3, -8.074536727092352e-02f);
95 _EIGEN_DECLARE_CONST_Packet8f(coeff_left_5, 2.489871967827018e-03f);
96 _EIGEN_DECLARE_CONST_Packet8f(coeff_left_7, -3.587725841214251e-05f);
97 Packet8f z2 = pmul(z, z);
98 Packet8f left = pmadd(p8f_coeff_left_7, z2, p8f_coeff_left_5);
99 left = pmadd(left, z2, p8f_coeff_left_3);
100 left = pmadd(left, z2, p8f_coeff_left_1);
101 left = pmul(left, z);
102
103 // Assemble the results, i.e. select the left and right polynomials.
104 left = _mm256_andnot_ps(ival_mask, left);
105 right = _mm256_and_ps(ival_mask, right);
106 Packet8f res = _mm256_or_ps(left, right);
107
108 // Flip the sign on the odd intervals and return the result.
109 res = _mm256_xor_ps(res, _mm256_castsi256_ps(sign_flip_mask));
110 return res;
111}
#define _EIGEN_DECLARE_CONST_Packet8i(NAME, X)
Definition PacketMath.h:48

References _EIGEN_DECLARE_CONST_Packet8f, _EIGEN_DECLARE_CONST_Packet8i, padd(), pmadd(), pmul(), pshiftleft(), and psub().

+ Here is the call graph for this function:

◆ psinh()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::psinh ( const Packet a)
390{ using std::sinh; return sinh(a); }
EIGEN_DEVICE_FUNC const SinhReturnType sinh() const
Definition ArrayCwiseUnaryOps.h:304

References sinh().

+ Here is the call graph for this function:

◆ psqrt()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::psqrt ( const Packet a)
418{ using std::sqrt; return sqrt(a); }

References sqrt().

Referenced by prsqrt().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ psqrt< Packet2d >() [1/2]

479{ return _mm_sqrt_pd(x); }

Referenced by prsqrt< Packet2d >(), and psqrt< Packet4f >().

+ Here is the caller graph for this function:

◆ psqrt< Packet2d >() [2/2]

106{
107 return __builtin_s390_vfsqdb(x);
108}

◆ psqrt< Packet4d >()

◆ psqrt< Packet4f >() [1/2]

◆ psqrt< Packet4f >() [2/2]

112{
113 Packet4f res;
114 res.v4f[0] = psqrt<Packet2d>(x.v4f[0]);
115 res.v4f[1] = psqrt<Packet2d>(x.v4f[1]);
116 return res;
117}

References psqrt< Packet2d >(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ psqrt< Packet8f >()

◆ pstore()

◆ pstore1()

template<typename Packet >
void Eigen::internal::pstore1 ( typename unpacket_traits< Packet >::type *  to,
const typename unpacket_traits< Packet >::type &  a 
)
inline
446{
447 pstore(to, pset1<Packet>(a));
448}

References pstore().

+ Here is the call graph for this function:

◆ pstore1< Packet16f >()

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet16f > ( float *  to,
const float &  a 
)
606 {
607 Packet16f pa = pset1<Packet16f>(a);
608 pstore(to, pa);
609}

References pset1< Packet16f >(), and pstore().

+ Here is the call graph for this function:

◆ pstore1< Packet16i >()

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet16i > ( int *  to,
const int &  a 
)
616 {
617 Packet16i pa = pset1<Packet16i>(a);
618 pstore(to, pa);
619}

References pset1< Packet16i >(), and pstore().

+ Here is the call graph for this function:

◆ pstore1< Packet2d >()

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet2d > ( double *  to,
const double &  a 
)
407{
408 Packet2d pa = _mm_set_sd(a);
409 pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));
410}

References pstore(), and vec2d_swizzle1.

+ Here is the call graph for this function:

◆ pstore1< Packet4d >()

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet4d > ( double *  to,
const double &  a 
)
301{
302 Packet4d pa = pset1<Packet4d>(a);
303 pstore(to, pa);
304}

References pset1< Packet4d >(), and pstore().

+ Here is the call graph for this function:

◆ pstore1< Packet4f >()

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet4f > ( float *  to,
const float &  a 
)
401{
402 Packet4f pa = _mm_set_ss(a);
403 pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));
404}

References pstore(), and vec4f_swizzle1.

+ Here is the call graph for this function:

◆ pstore1< Packet8d >()

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet8d > ( double *  to,
const double &  a 
)
611 {
612 Packet8d pa = pset1<Packet8d>(a);
613 pstore(to, pa);
614}

References pset1< Packet8d >(), and pstore().

+ Here is the call graph for this function:

◆ pstore1< Packet8f >()

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet8f > ( float *  to,
const float &  a 
)
296{
297 Packet8f pa = pset1<Packet8f>(a);
298 pstore(to, pa);
299}

References pset1< Packet8f >(), and pstore().

+ Here is the call graph for this function:

◆ pstore1< Packet8i >()

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore1< Packet8i > ( int *  to,
const int &  a 
)
306{
307 Packet8i pa = pset1<Packet8i>(a);
308 pstore(to, pa);
309}

References pset1< Packet8i >(), and pstore().

+ Here is the call graph for this function:

◆ pstore< double >() [1/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< double > ( double *  to,
const Packet2d from 
)
359{ EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
#define EIGEN_DEBUG_ALIGNED_STORE
Definition GenericPacketMath.h:35

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< double >() [2/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< double > ( double *  to,
const Packet2d from 
)
385{
386 // FIXME: No intrinsic yet
388 Packet *vto;
389 vto = (Packet *) to;
390 vto->v2d = from;
391}

References EIGEN_DEBUG_ALIGNED_STORE, and Eigen::internal::Packet::v2d.

◆ pstore< double >() [3/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< double > ( double *  to,
const Packet4d from 
)
252{ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }

References EIGEN_DEBUG_ALIGNED_STORE.

Referenced by pscatter< double, Packet2d >(), and pstoreu< double >().

+ Here is the caller graph for this function:

◆ pstore< double >() [4/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< double > ( double *  to,
const Packet8d from 
)
542 {
543 EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from);
544}

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< float >() [1/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< float > ( float *  to,
const Packet16f from 
)
538 {
539 EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from);
540}

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< float >() [2/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< float > ( float *  to,
const Packet4f from 
)
260{
262#ifdef __VSX__
263 vec_vsx_st(from, 0, to);
264#else
265 vec_st(from, 0, to);
266#endif
267}

References EIGEN_DEBUG_ALIGNED_STORE.

Referenced by pscatter< float, Packet4f >(), and pstoreu< float >().

+ Here is the caller graph for this function:

◆ pstore< float >() [3/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< float > ( float *  to,
const Packet4f from 
)
298{ EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< float >() [4/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< float > ( float *  to,
const Packet4f from 
)
358{ EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< float >() [5/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< float > ( float *  to,
const Packet4f from 
)
376{
377 // FIXME: No intrinsic yet
379 vec_st2f(from.v4f[0], &to[0]);
380 vec_st2f(from.v4f[1], &to[2]);
381}

References EIGEN_DEBUG_ALIGNED_STORE, and Eigen::internal::Packet4f::v4f.

◆ pstore< float >() [6/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< float > ( float *  to,
const Packet8f from 
)
251{ EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< int >() [1/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< int > ( int *  to,
const Packet16i from 
)
546 {
547 EIGEN_DEBUG_ALIGNED_STORE _mm512_storeu_si512(reinterpret_cast<__m512i*>(to),
548 from);
549}

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< int >() [2/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< int > ( int *  to,
const Packet4i from 
)
270{
272#ifdef __VSX__
273 vec_vsx_st(from, 0, to);
274#else
275 vec_st(from, 0, to);
276#endif
277}

References EIGEN_DEBUG_ALIGNED_STORE.

Referenced by pscatter< int, Packet4i >(), and pstoreu< int >().

+ Here is the caller graph for this function:

◆ pstore< int >() [3/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< int > ( int *  to,
const Packet4i from 
)
360{ EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< int >() [4/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< int > ( int *  to,
const Packet4i from 
)
367{
368 // FIXME: No intrinsic yet
370 Packet *vto;
371 vto = (Packet *) to;
372 vto->v4i = from;
373}

References EIGEN_DEBUG_ALIGNED_STORE, and Eigen::internal::Packet::v4i.

◆ pstore< int >() [5/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< int > ( int *  to,
const Packet8i from 
)
253{ EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< int32_t >()

299{ EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }

References EIGEN_DEBUG_ALIGNED_STORE.

◆ pstore< std::complex< double > >() [1/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< double > > ( std::complex< double > *  to,
const Packet1cd from 
)
324{ EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, Packet2d(from.v)); }

References EIGEN_DEBUG_ALIGNED_STORE, and pstore().

+ Here is the call graph for this function:

◆ pstore< std::complex< double > >() [2/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< double > > ( std::complex< double > *  to,
const Packet1cd from 
)
97{ EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }

References EIGEN_DEBUG_ALIGNED_STORE, and pstore().

+ Here is the call graph for this function:

◆ pstore< std::complex< double > >() [3/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< double > > ( std::complex< double > *  to,
const Packet2cd from 
)
294{ EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }

References EIGEN_DEBUG_ALIGNED_STORE, and pstore().

+ Here is the call graph for this function:

◆ pstore< std::complex< float > >() [1/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf from 
)
80{ pstore((float*)to, from.v); }

References pstore().

+ Here is the call graph for this function:

◆ pstore< std::complex< float > >() [2/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf from 
)
126{ EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }

References EIGEN_DEBUG_ALIGNED_STORE, and pstore().

+ Here is the call graph for this function:

◆ pstore< std::complex< float > >() [3/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf from 
)
113{ EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), Packet4f(from.v)); }

References EIGEN_DEBUG_ALIGNED_STORE, pstore(), and Eigen::numext::real_ref().

+ Here is the call graph for this function:

◆ pstore< std::complex< float > >() [4/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf from 
)
96{ EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }

References EIGEN_DEBUG_ALIGNED_STORE, and pstore().

+ Here is the call graph for this function:

◆ pstore< std::complex< float > >() [5/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstore< std::complex< float > > ( std::complex< float > *  to,
const Packet4cf from 
)
92{ EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }

References EIGEN_DEBUG_ALIGNED_STORE, pstore(), and Eigen::numext::real_ref().

+ Here is the call graph for this function:

◆ pstoret()

template<typename Scalar , typename Packet , int Alignment>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void Eigen::internal::pstoret ( Scalar *  to,
const Packet from 
)
472{
474 pstore(to, from);
475 else
476 pstoreu(to, from);
477}
EIGEN_DEVICE_FUNC void pstoreu(Scalar *to, const Packet &from)
Definition GenericPacketMath.h:289

References pstore(), and pstoreu().

+ Here is the call graph for this function:

◆ pstoreu()

◆ pstoreu< double >() [1/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< double > ( double *  to,
const Packet2d from 
)
362{ EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }
#define EIGEN_DEBUG_UNALIGNED_STORE
Definition GenericPacketMath.h:39

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< double >() [2/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< double > ( double *  to,
const Packet2d from 
)
678{ pstore<double>(to, from); }

References pstore< double >().

+ Here is the call graph for this function:

◆ pstoreu< double >() [3/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< double > ( double *  to,
const Packet4d from 
)
256{ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< double >() [4/4]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< double > ( double *  to,
const Packet8d from 
)
556 {
557 EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from);
558}

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< float >() [1/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< float > ( float *  to,
const Packet16f from 
)
552 {
553 EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from);
554}

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< float >() [2/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< float > ( float *  to,
const Packet4f from 
)
528{
530 vec_vsx_st(from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to));
531}

References _EIGEN_ALIGNED_PTR, and EIGEN_DEBUG_ALIGNED_STORE.

◆ pstoreu< float >() [3/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< float > ( float *  to,
const Packet4f from 
)
301{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< float >() [4/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< float > ( float *  to,
const Packet4f from 
)
363{ EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); }

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< float >() [5/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< float > ( float *  to,
const Packet4f from 
)
677{ pstore<float>(to, from); }

References pstore< float >().

+ Here is the call graph for this function:

◆ pstoreu< float >() [6/6]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< float > ( float *  to,
const Packet8f from 
)
255{ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< int >() [1/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< int > ( int *  to,
const Packet16i from 
)
560 {
561 EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(
562 reinterpret_cast<__m512i*>(to), from);
563}

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< int >() [2/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< int > ( int *  to,
const Packet4i from 
)
523{
525 vec_vsx_st(from, (long)to & 15, (int*) _EIGEN_ALIGNED_PTR(to));
526}

References _EIGEN_ALIGNED_PTR, and EIGEN_DEBUG_ALIGNED_STORE.

◆ pstoreu< int >() [3/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< int > ( int *  to,
const Packet4i from 
)
364{ EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< int >() [4/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< int > ( int *  to,
const Packet4i from 
)
676{ pstore<int>(to, from); }

References pstore< int >().

+ Here is the call graph for this function:

◆ pstoreu< int >() [5/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< int > ( int *  to,
const Packet8i from 
)
257{ EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< int32_t >()

302{ EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }

References EIGEN_DEBUG_UNALIGNED_STORE.

◆ pstoreu< std::complex< double > >() [1/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< double > > ( std::complex< double > *  to,
const Packet1cd from 
)
325{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, Packet2d(from.v)); }

References EIGEN_DEBUG_UNALIGNED_STORE, and pstoreu().

+ Here is the call graph for this function:

◆ pstoreu< std::complex< double > >() [2/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< double > > ( std::complex< double > *  to,
const Packet1cd from 
)
99{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }

References EIGEN_DEBUG_UNALIGNED_STORE, and pstoreu().

+ Here is the call graph for this function:

◆ pstoreu< std::complex< double > >() [3/3]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< double > > ( std::complex< double > *  to,
const Packet2cd from 
)
295{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }

References EIGEN_DEBUG_UNALIGNED_STORE, and pstoreu().

+ Here is the call graph for this function:

◆ pstoreu< std::complex< float > >() [1/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf from 
)
81{ pstoreu((float*)to, from.v); }

References pstoreu().

+ Here is the call graph for this function:

◆ pstoreu< std::complex< float > >() [2/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf from 
)
127{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }

References EIGEN_DEBUG_UNALIGNED_STORE, and pstoreu().

+ Here is the call graph for this function:

◆ pstoreu< std::complex< float > >() [3/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf from 
)
114{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); }

References EIGEN_DEBUG_UNALIGNED_STORE, pstoreu(), and Eigen::numext::real_ref().

+ Here is the call graph for this function:

◆ pstoreu< std::complex< float > >() [4/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< float > > ( std::complex< float > *  to,
const Packet2cf from 
)
98{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }

References EIGEN_DEBUG_UNALIGNED_STORE, and pstoreu().

+ Here is the call graph for this function:

◆ pstoreu< std::complex< float > >() [5/5]

template<>
EIGEN_STRONG_INLINE void Eigen::internal::pstoreu< std::complex< float > > ( std::complex< float > *  to,
const Packet4cf from 
)
93{ EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }

References EIGEN_DEBUG_UNALIGNED_STORE, pstoreu(), and Eigen::numext::real_ref().

+ Here is the call graph for this function:

◆ psub()

◆ psub< Packet16f >()

164 {
165 return _mm512_sub_ps(a, b);
166}

◆ psub< Packet1cd >() [1/2]

286{ return Packet1cd(_mm_sub_pd(a.v,b.v)); }

◆ psub< Packet1cd >() [2/2]

137{ return Packet1cd(a.v - b.v); }

◆ psub< Packet2cd >()

257{ return Packet2cd(_mm256_sub_pd(a.v,b.v)); }

◆ psub< Packet2cf >() [1/4]

99{ return Packet2cf(a.v - b.v); }

◆ psub< Packet2cf >() [2/4]

76{ return Packet2cf(psub<Packet4f>(a.v,b.v)); }

References psub< Packet4f >().

+ Here is the call graph for this function:

◆ psub< Packet2cf >() [3/4]

56{ return Packet2cf(_mm_sub_ps(a.v,b.v)); }

◆ psub< Packet2cf >() [4/4]

136{ return Packet2cf(psub<Packet4f>(a.v, b.v)); }

References psub< Packet4f >().

+ Here is the call graph for this function:

◆ psub< Packet2d >() [1/2]

203{ return _mm_sub_pd(a,b); }

◆ psub< Packet2d >() [2/2]

516{ return (a - b); }

◆ psub< Packet4cf >()

51{ return Packet4cf(_mm256_sub_ps(a.v,b.v)); }

◆ psub< Packet4d >()

134{ return _mm256_sub_pd(a,b); }

◆ psub< Packet4f >() [1/4]

352{ return a - b; }

Referenced by psub< Packet2cf >().

+ Here is the caller graph for this function:

◆ psub< Packet4f >() [2/4]

164{ return vsubq_f32(a,b); }

◆ psub< Packet4f >() [3/4]

202{ return _mm_sub_ps(a,b); }

◆ psub< Packet4f >() [4/4]

510{
511 Packet4f c;
512 c.v4f[0] = a.v4f[0] - b.v4f[0];
513 c.v4f[1] = a.v4f[1] - b.v4f[1];
514 return c;
515}

◆ psub< Packet4i >() [1/4]

353{ return a - b; }

◆ psub< Packet4i >() [2/4]

165{ return vsubq_s32(a,b); }

◆ psub< Packet4i >() [3/4]

204{ return _mm_sub_epi32(a,b); }

◆ psub< Packet4i >() [4/4]

508{ return (a - b); }

◆ psub< Packet8d >()

169 {
170 return _mm512_sub_pd(a, b);
171}

◆ psub< Packet8f >()

133{ return _mm256_sub_ps(a,b); }

◆ ptan()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::ptan ( const Packet a)
374{ using std::tan; return tan(a); }
EIGEN_DEVICE_FUNC const TanReturnType tan() const
Definition ArrayCwiseUnaryOps.h:234

References tan().

+ Here is the call graph for this function:

◆ ptanh()

template<typename Packet >
EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS Packet Eigen::internal::ptanh ( const Packet a)
398{ using std::tanh; return tanh(a); }
EIGEN_DEVICE_FUNC const TanhReturnType tanh() const
Definition ArrayCwiseUnaryOps.h:290

References tanh().

Referenced by Eigen::internal::scalar_tanh_op< Scalar >::packetOp().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ptanh< Packet4f >()

530 {
531 return internal::generic_fast_tanh_float(x);
532}

References generic_fast_tanh_float().

+ Here is the call graph for this function:

◆ ptanh< Packet8f >()

271 {
272 return internal::generic_fast_tanh_float(x);
273}

References generic_fast_tanh_float().

+ Here is the call graph for this function:

◆ ptranspose() [1/15]

template<typename Packet >
EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet, 1 > &  )
inline
544 {
545 // Nothing to do in the scalar case, i.e. a 1x1 matrix.
546}

◆ ptranspose() [2/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet16f, 16 > &  kernel)
inline
1049 {
1050 __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1051 __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1052 __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1053 __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1054 __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
1055 __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
1056 __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
1057 __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
1058 __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]);
1059 __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]);
1060 __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]);
1061 __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]);
1062 __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]);
1063 __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]);
1064 __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]);
1065 __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]);
1066 __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1067 __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1068 __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1069 __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1070 __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));
1071 __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));
1072 __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));
1073 __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));
1074 __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0));
1075 __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2));
1076 __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0));
1077 __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2));
1078 __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0));
1079 __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2));
1080 __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0));
1081 __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2));
1082
1093 EIGEN_EXTRACT_8f_FROM_16f(S10, S10);
1094 EIGEN_EXTRACT_8f_FROM_16f(S11, S11);
1095 EIGEN_EXTRACT_8f_FROM_16f(S12, S12);
1096 EIGEN_EXTRACT_8f_FROM_16f(S13, S13);
1097 EIGEN_EXTRACT_8f_FROM_16f(S14, S14);
1098 EIGEN_EXTRACT_8f_FROM_16f(S15, S15);
1099
1101
1102 tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20);
1103 tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20);
1104 tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20);
1105 tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20);
1106 tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31);
1107 tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31);
1108 tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31);
1109 tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31);
1110
1111 tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20);
1112 tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20);
1113 tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20);
1114 tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20);
1115 tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31);
1116 tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31);
1117 tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31);
1118 tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31);
1119
1120 // Second set of _m256 outputs
1121 tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20);
1122 tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20);
1123 tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20);
1124 tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20);
1125 tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31);
1126 tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31);
1127 tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31);
1128 tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31);
1129
1130 tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20);
1131 tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20);
1132 tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20);
1133 tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20);
1134 tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31);
1135 tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31);
1136 tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31);
1137 tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31);
1138
1139 // Pack them into the output
1140 PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16);
1141 PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16);
1142 PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16);
1143 PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16);
1144
1145 PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16);
1146 PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16);
1147 PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16);
1148 PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16);
1149
1150 PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16);
1151 PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16);
1152 PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16);
1153 PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16);
1154
1155 PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16);
1156 PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16);
1157 PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16);
1158 PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16);
1159}
#define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE)
Definition PacketMath.h:1046

References EIGEN_EXTRACT_8f_FROM_16f, PACK_OUTPUT, and Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [3/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet16f, 4 > &  kernel)
inline
1164 {
1165 __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
1166 __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
1167 __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
1168 __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
1169
1170 __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));
1171 __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));
1172 __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));
1173 __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));
1174
1179
1181
1182 tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20);
1183 tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20);
1184 tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31);
1185 tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31);
1186
1187 tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20);
1188 tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20);
1189 tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31);
1190 tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31);
1191
1192 PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1);
1193 PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1);
1194 PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1);
1195 PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1);
1196}
#define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE)
Definition PacketMath.h:1160

References EIGEN_EXTRACT_8f_FROM_16f, PACK_OUTPUT_2, and Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [4/15]

EIGEN_STRONG_INLINE void Eigen::internal::ptranspose ( PacketBlock< Packet1cd, 2 > &  kernel)
373{
374 Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
375 kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
376 kernel.packet[0].v = tmp;
377}

References p16uc_TRANSPOSE64_HI, p16uc_TRANSPOSE64_LO, and Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [5/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet2cd, 2 > &  kernel)
inline
421 {
422 __m256d tmp = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 0+(2<<4));
423 kernel.packet[1].v = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 1+(3<<4));
424 kernel.packet[0].v = tmp;
425}

References Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [6/15]

EIGEN_STRONG_INLINE void Eigen::internal::ptranspose ( PacketBlock< Packet2cf, 2 > &  kernel)
inline
243{
244 Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);
245 kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);
246 kernel.packet[0].v = tmp;
247}

References p16uc_TRANSPOSE64_HI, p16uc_TRANSPOSE64_LO, and Eigen::internal::PacketBlock< Packet, N >::packet.

Referenced by Eigen::internal::gemm_pack_lhs< Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode >::operator()(), Eigen::internal::gemm_pack_rhs< Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode >::operator()(), preduxp< Packet2cf >(), preduxp< Packet4f >(), and Eigen::internal::inplace_transpose_selector< MatrixType, true, true >::run().

+ Here is the caller graph for this function:

◆ ptranspose() [7/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet2d, 2 > &  kernel)
inline
783 {
784 __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
785 kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
786 kernel.packet[1] = tmp;
787}

References Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [8/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet4cf, 4 > &  kernel)
inline
403 {
404 __m256d P0 = _mm256_castps_pd(kernel.packet[0].v);
405 __m256d P1 = _mm256_castps_pd(kernel.packet[1].v);
406 __m256d P2 = _mm256_castps_pd(kernel.packet[2].v);
407 __m256d P3 = _mm256_castps_pd(kernel.packet[3].v);
408
409 __m256d T0 = _mm256_shuffle_pd(P0, P1, 15);
410 __m256d T1 = _mm256_shuffle_pd(P0, P1, 0);
411 __m256d T2 = _mm256_shuffle_pd(P2, P3, 15);
412 __m256d T3 = _mm256_shuffle_pd(P2, P3, 0);
413
414 kernel.packet[1].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 32));
415 kernel.packet[3].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 49));
416 kernel.packet[0].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 32));
417 kernel.packet[2].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 49));
418}

References Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [9/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet4d, 4 > &  kernel)
inline
588 {
589 __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
590 __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
591 __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
592 __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
593
594 kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
595 kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
596 kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
597 kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
598}

References Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [10/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet4f, 4 > &  kernel)
inline
734 {
735 Packet4f t0, t1, t2, t3;
736 t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
737 t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
738 t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
739 t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
740 kernel.packet[0] = vec_mergeh(t0, t2);
741 kernel.packet[1] = vec_mergel(t0, t2);
742 kernel.packet[2] = vec_mergeh(t1, t3);
743 kernel.packet[3] = vec_mergel(t1, t3);
744}

References Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [11/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet4i, 4 > &  kernel)
inline
747 {
748 Packet4i t0, t1, t2, t3;
749 t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);
750 t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);
751 t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);
752 t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);
753 kernel.packet[0] = vec_mergeh(t0, t2);
754 kernel.packet[1] = vec_mergel(t0, t2);
755 kernel.packet[2] = vec_mergeh(t1, t3);
756 kernel.packet[3] = vec_mergel(t1, t3);
757}

References Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [12/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet8d, 4 > &  kernel)
inline
1207 {
1208 __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
1209 __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff);
1210 __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
1211 __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff);
1212
1214
1215 tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1216 _mm512_extractf64x4_pd(T2, 0), 0x20);
1217 tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1218 _mm512_extractf64x4_pd(T3, 0), 0x20);
1219 tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1220 _mm512_extractf64x4_pd(T2, 0), 0x31);
1221 tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1222 _mm512_extractf64x4_pd(T3, 0), 0x31);
1223
1224 tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1225 _mm512_extractf64x4_pd(T2, 1), 0x20);
1226 tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1227 _mm512_extractf64x4_pd(T3, 1), 0x20);
1228 tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1229 _mm512_extractf64x4_pd(T2, 1), 0x31);
1230 tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1231 _mm512_extractf64x4_pd(T3, 1), 0x31);
1232
1233 PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1);
1234 PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1);
1235 PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1);
1236 PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1);
1237}
#define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE)
Definition PacketMath.h:1202

References PACK_OUTPUT_D, and Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [13/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet8d, 8 > &  kernel)
inline
1239 {
1240 __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]);
1241 __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]);
1242 __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]);
1243 __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]);
1244 __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]);
1245 __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]);
1246 __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]);
1247 __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]);
1248
1250
1251 tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1252 _mm512_extractf64x4_pd(T2, 0), 0x20);
1253 tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1254 _mm512_extractf64x4_pd(T3, 0), 0x20);
1255 tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),
1256 _mm512_extractf64x4_pd(T2, 0), 0x31);
1257 tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),
1258 _mm512_extractf64x4_pd(T3, 0), 0x31);
1259
1260 tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1261 _mm512_extractf64x4_pd(T2, 1), 0x20);
1262 tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1263 _mm512_extractf64x4_pd(T3, 1), 0x20);
1264 tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),
1265 _mm512_extractf64x4_pd(T2, 1), 0x31);
1266 tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),
1267 _mm512_extractf64x4_pd(T3, 1), 0x31);
1268
1269 tmp.packet[8] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
1270 _mm512_extractf64x4_pd(T6, 0), 0x20);
1271 tmp.packet[9] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
1272 _mm512_extractf64x4_pd(T7, 0), 0x20);
1273 tmp.packet[10] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),
1274 _mm512_extractf64x4_pd(T6, 0), 0x31);
1275 tmp.packet[11] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),
1276 _mm512_extractf64x4_pd(T7, 0), 0x31);
1277
1278 tmp.packet[12] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
1279 _mm512_extractf64x4_pd(T6, 1), 0x20);
1280 tmp.packet[13] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
1281 _mm512_extractf64x4_pd(T7, 1), 0x20);
1282 tmp.packet[14] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),
1283 _mm512_extractf64x4_pd(T6, 1), 0x31);
1284 tmp.packet[15] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),
1285 _mm512_extractf64x4_pd(T7, 1), 0x31);
1286
1287 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 0, 8);
1288 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 1, 8);
1289 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 2, 8);
1290 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 3, 8);
1291
1292 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 4, 8);
1293 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 5, 8);
1294 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 6, 8);
1295 PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 7, 8);
1296}
#define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE)
Definition PacketMath.h:1198

References PACK_OUTPUT_SQ_D, and Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [14/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet8f, 4 > &  kernel)
inline
570 {
571 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
572 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
573 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
574 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
575
576 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
577 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
578 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
579 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
580
581 kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
582 kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
583 kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
584 kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
585}

References Eigen::internal::PacketBlock< Packet, N >::packet.

◆ ptranspose() [15/15]

EIGEN_DEVICE_FUNC void Eigen::internal::ptranspose ( PacketBlock< Packet8f, 8 > &  kernel)
inline
542 {
543 __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
544 __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
545 __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
546 __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
547 __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
548 __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
549 __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
550 __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
551 __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
552 __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
553 __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
554 __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
555 __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
556 __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
557 __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
558 __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
559 kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
560 kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
561 kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
562 kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
563 kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
564 kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
565 kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
566 kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
567}

References Eigen::internal::PacketBlock< Packet, N >::packet.

◆ punpackp()

EIGEN_STRONG_INLINE void Eigen::internal::punpackp ( Packet4f vecs)
501{
502 vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
503 vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
504 vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
505 vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
506}

◆ putMarketHeader()

template<typename Scalar >
void Eigen::internal::putMarketHeader ( std::string &  header,
int  sym 
)
inline
67 {
68 header= "%%MatrixMarket matrix coordinate ";
69 if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
70 {
71 header += " complex";
72 if(sym == Symmetric) header += " symmetric";
73 else if (sym == SelfAdjoint) header += " Hermitian";
74 else header += " general";
75 }
76 else
77 {
78 header += " real";
79 if(sym == Symmetric) header += " symmetric";
80 else header += " general";
81 }
82 }
Definition Meta.h:63

References Eigen::SelfAdjoint, and Eigen::Symmetric.

◆ PutMatrixElt() [1/2]

template<typename Scalar >
void Eigen::internal::PutMatrixElt ( Scalar  value,
int  row,
int  col,
std::ofstream &  out 
)
inline
86 {
87 out << row << " "<< col << " " << value << "\n";
88 }

References col(), and row().

Referenced by Eigen::saveMarket().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ PutMatrixElt() [2/2]

template<typename Scalar >
void Eigen::internal::PutMatrixElt ( std::complex< Scalar >  value,
int  row,
int  col,
std::ofstream &  out 
)
inline
91 {
92 out << row << " " << col << " " << value.real() << " " << value.imag() << "\n";
93 }

References col(), and row().

+ Here is the call graph for this function:

◆ putVectorElt() [1/2]

template<typename Scalar >
void Eigen::internal::putVectorElt ( Scalar  value,
std::ofstream &  out 
)
inline
98 {
99 out << value << "\n";
100 }

Referenced by Eigen::saveMarketVector().

+ Here is the caller graph for this function:

◆ putVectorElt() [2/2]

template<typename Scalar >
void Eigen::internal::putVectorElt ( std::complex< Scalar >  value,
std::ofstream &  out 
)
inline
103 {
104 out << value.real << " " << value.imag()<< "\n";
105 }

◆ pxor()

template<typename Packet >
EIGEN_DEVICE_FUNC Packet Eigen::internal::pxor ( const Packet a,
const Packet b 
)
inline
206{ return a ^ b; }

Referenced by pmul< Packet2cf >(), and Eigen::internal::quat_product< Architecture::SSE, Derived, OtherDerived, double >::run().

+ Here is the caller graph for this function:

◆ pxor< Packet16f >()

344 {
345#ifdef EIGEN_VECTORIZE_AVX512DQ
346 return _mm512_xor_ps(a, b);
347#else
348 Packet16f res = _mm512_undefined_ps();
349 Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);
350 Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);
351 res = _mm512_insertf32x4(res, _mm_xor_ps(lane0_a, lane0_b), 0);
352
353 Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);
354 Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);
355 res = _mm512_insertf32x4(res, _mm_xor_ps(lane1_a, lane1_b), 1);
356
357 Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);
358 Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);
359 res = _mm512_insertf32x4(res, _mm_xor_ps(lane2_a, lane2_b), 2);
360
361 Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);
362 Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);
363 res = _mm512_insertf32x4(res, _mm_xor_ps(lane3_a, lane3_b), 3);
364
365 return res;
366#endif
367}

◆ pxor< Packet1cd >() [1/2]

310{ return Packet1cd(_mm_xor_pd(a.v,b.v)); }

◆ pxor< Packet1cd >() [2/2]

178{ return Packet1cd(vec_xor(a.v,b.v)); }

◆ pxor< Packet2cd >()

277{ return Packet2cd(_mm256_xor_pd(a.v,b.v)); }

◆ pxor< Packet2cf >() [1/4]

124{ return Packet2cf(pxor<Packet4f>(a.v, b.v)); }

References pxor< Packet4f >().

+ Here is the call graph for this function:

◆ pxor< Packet2cf >() [2/4]

113{
114 return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));
115}

◆ pxor< Packet2cf >() [3/4]

87{ return Packet2cf(_mm_xor_ps(a.v,b.v)); }

◆ pxor< Packet2cf >() [4/4]

179{ return Packet2cf(pxor<Packet4f>(a.v,b.v)); }

References pxor< Packet4f >().

+ Here is the call graph for this function:

◆ pxor< Packet2d >() [1/2]

299{ return _mm_xor_pd(a,b); }

◆ pxor< Packet2d >() [2/2]

607{ return vec_xor(a, b); }

◆ pxor< Packet4cf >()

72{ return Packet4cf(_mm256_xor_ps(a.v,b.v)); }

◆ pxor< Packet4d >()

209{ return _mm256_xor_pd(a,b); }

◆ pxor< Packet4f >() [1/4]

421{ return vec_xor(a, b); }

Referenced by pconj(), and pxor< Packet2cf >().

+ Here is the caller graph for this function:

◆ pxor< Packet4f >() [2/4]

266{
267 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));
268}

◆ pxor< Packet4f >() [3/4]

298{ return _mm_xor_ps(a,b); }

◆ pxor< Packet4f >() [4/4]

609{
610 Packet4f res;
611 res.v4f[0] = pand(a.v4f[0], b.v4f[0]);
612 res.v4f[1] = pand(a.v4f[1], b.v4f[1]);
613 return res;
614}

References pand(), and Eigen::internal::Packet4f::v4f.

+ Here is the call graph for this function:

◆ pxor< Packet4i >() [1/4]

422{ return vec_xor(a, b); }

◆ pxor< Packet4i >() [2/4]

269{ return veorq_s32(a,b); }

◆ pxor< Packet4i >() [3/4]

300{ return _mm_xor_si128(a,b); }

◆ pxor< Packet4i >() [4/4]

606{ return vec_xor(a, b); }

◆ pxor< Packet8d >()

370 {
371#ifdef EIGEN_VECTORIZE_AVX512DQ
372 return _mm512_xor_pd(a, b);
373#else
374 Packet8d res = _mm512_undefined_pd();
375 Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);
376 Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);
377 res = _mm512_insertf64x4(res, _mm256_xor_pd(lane0_a, lane0_b), 0);
378
379 Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);
380 Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);
381 res = _mm512_insertf64x4(res, _mm256_xor_pd(lane1_a, lane1_b), 1);
382
383 return res;
384#endif
385}

◆ pxor< Packet8f >()

208{ return _mm256_xor_ps(a,b); }

◆ queryCacheSizes()

void Eigen::internal::queryCacheSizes ( int &  l1,
int &  l2,
int &  l3 
)
inline
937{
938 #ifdef EIGEN_CPUID
939 int abcd[4];
940 const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
941 const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
942 const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
943
944 // identify the CPU vendor
945 EIGEN_CPUID(abcd,0x0,0);
946 int max_std_funcs = abcd[1];
947 if(cpuid_is_vendor(abcd,GenuineIntel))
948 queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
949 else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
950 queryCacheSizes_amd(l1,l2,l3);
951 else
952 // by default let's use Intel's API
953 queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
954
955 // here is the list of other vendors:
956// ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
957// ||cpuid_is_vendor(abcd,"CyrixInstead")
958// ||cpuid_is_vendor(abcd,"CentaurHauls")
959// ||cpuid_is_vendor(abcd,"GenuineTMx86")
960// ||cpuid_is_vendor(abcd,"TransmetaCPU")
961// ||cpuid_is_vendor(abcd,"RiseRiseRise")
962// ||cpuid_is_vendor(abcd,"Geode by NSC")
963// ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
964// ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
965// ||cpuid_is_vendor(abcd,"NexGenDriven")
966 #else
967 l1 = l2 = l3 = -1;
968 #endif
969}

Referenced by Eigen::internal::CacheSizes::CacheSizes(), queryL1CacheSize(), and queryTopLevelCacheSize().

+ Here is the caller graph for this function:

◆ queryL1CacheSize()

int Eigen::internal::queryL1CacheSize ( )
inline
974{
975 int l1(-1), l2, l3;
976 queryCacheSizes(l1,l2,l3);
977 return l1;
978}
void queryCacheSizes(int &l1, int &l2, int &l3)
Definition Memory.h:936

References queryCacheSizes().

+ Here is the call graph for this function:

◆ queryTopLevelCacheSize()

int Eigen::internal::queryTopLevelCacheSize ( )
inline
983{
984 int l1, l2(-1), l3(-1);
985 queryCacheSizes(l1,l2,l3);
986 return (std::max)(l2,l3);
987}

References queryCacheSizes().

+ Here is the call graph for this function:

◆ QuickSplit()

template<typename VectorV , typename VectorI >
Index Eigen::internal::QuickSplit ( VectorV &  row,
VectorI &  ind,
Index  ncut 
)
30{
31 typedef typename VectorV::RealScalar RealScalar;
32 using std::swap;
33 using std::abs;
34 Index mid;
35 Index n = row.size(); /* length of the vector */
36 Index first, last ;
37
38 ncut--; /* to fit the zero-based indices */
39 first = 0;
40 last = n-1;
41 if (ncut < first || ncut > last ) return 0;
42
43 do {
44 mid = first;
45 RealScalar abskey = abs(row(mid));
46 for (Index j = first + 1; j <= last; j++) {
47 if ( abs(row(j)) > abskey) {
48 ++mid;
49 swap(row(mid), row(j));
50 swap(ind(mid), ind(j));
51 }
52 }
53 /* Interchange for the pivot element */
54 swap(row(mid), row(first));
55 swap(ind(mid), ind(first));
56
57 if (mid > ncut) last = mid - 1;
58 else if (mid < ncut ) first = mid + 1;
59 } while (mid != ncut );
60
61 return 0; /* mid is equal to ncut */
62}
void swap(scoped_array< T > &a, scoped_array< T > &b)
Definition Memory.h:602

References row(), and swap().

Referenced by Eigen::IncompleteLUT< _Scalar, _StorageIndex >::factorize(), and Eigen::IncompleteCholesky< Scalar, _UpLo, _OrderingType >::factorize().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ rcond_estimate_helper()

template<typename Decomposition >
Decomposition::RealScalar Eigen::internal::rcond_estimate_helper ( typename Decomposition::RealScalar  matrix_norm,
const Decomposition &  dec 
)

Reciprocal condition number estimator.

Computing a decomposition of a dense matrix takes O(n^3) operations, while this method estimates the condition number quickly and reliably in O(n^2) operations.

Returns
an estimate of the reciprocal condition number (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and its decomposition. Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and LLT.
See also
FullPivLU, PartialPivLU, LDLT, LLT.
160{
161 typedef typename Decomposition::RealScalar RealScalar;
162 eigen_assert(dec.rows() == dec.cols());
163 if (dec.rows() == 0) return NumTraits<RealScalar>::infinity();
164 if (matrix_norm == RealScalar(0)) return RealScalar(0);
165 if (dec.rows() == 1) return RealScalar(1);
166 const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);
167 return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0)
168 : (RealScalar(1) / inverse_matrix_norm) / matrix_norm);
169}
Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition &dec)
Definition ConditionEstimator.h:56

References eigen_assert, and rcond_invmatrix_L1_norm_estimate().

Referenced by Eigen::LDLT< _MatrixType, _UpLo >::rcond(), Eigen::LLT< _MatrixType, _UpLo >::rcond(), Eigen::FullPivLU< _MatrixType >::rcond(), and Eigen::PartialPivLU< _MatrixType >::rcond().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ rcond_invmatrix_L1_norm_estimate()

template<typename Decomposition >
Decomposition::RealScalar Eigen::internal::rcond_invmatrix_L1_norm_estimate ( const Decomposition &  dec)
Returns
an estimate of ||inv(matrix)||_1 given a decomposition of matrix that implements .solve() and .adjoint().solve() methods.

This function implements Algorithms 4.1 and 5.1 from http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf which also forms the basis for the condition number estimators in LAPACK. Since at most 10 calls to the solve method of dec are performed, the total cost is O(dims^2), as opposed to O(dims^3) needed to compute the inverse matrix explicitly.

The most common usage is in estimating the condition number ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be computed directly in O(n^2) operations.

Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and LLT.

See also
FullPivLU, PartialPivLU, LDLT, LLT.
57{
58 typedef typename Decomposition::MatrixType MatrixType;
59 typedef typename Decomposition::Scalar Scalar;
60 typedef typename Decomposition::RealScalar RealScalar;
61 typedef typename internal::plain_col_type<MatrixType>::type Vector;
63 const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);
64
65 eigen_assert(dec.rows() == dec.cols());
66 const Index n = dec.rows();
67 if (n == 0)
68 return 0;
69
70 // Disable Index to float conversion warning
71#ifdef __INTEL_COMPILER
72 #pragma warning push
73 #pragma warning ( disable : 2259 )
74#endif
75 Vector v = dec.solve(Vector::Ones(n) / Scalar(n));
76#ifdef __INTEL_COMPILER
77 #pragma warning pop
78#endif
79
80 // lower_bound is a lower bound on
81 // ||inv(matrix)||_1 = sup_v ||inv(matrix) v||_1 / ||v||_1
82 // and is the objective maximized by the ("super-") gradient ascent
83 // algorithm below.
84 RealScalar lower_bound = v.template lpNorm<1>();
85 if (n == 1)
86 return lower_bound;
87
88 // Gradient ascent algorithm follows: We know that the optimum is achieved at
89 // one of the simplices v = e_i, so in each iteration we follow a
90 // super-gradient to move towards the optimal one.
91 RealScalar old_lower_bound = lower_bound;
92 Vector sign_vector(n);
93 Vector old_sign_vector;
94 Index v_max_abs_index = -1;
95 Index old_v_max_abs_index = v_max_abs_index;
96 for (int k = 0; k < 4; ++k)
97 {
99 if (k > 0 && !is_complex && sign_vector == old_sign_vector) {
100 // Break if the solution stagnated.
101 break;
102 }
103 // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|
104 v = dec.adjoint().solve(sign_vector);
105 v.real().cwiseAbs().maxCoeff(&v_max_abs_index);
106 if (v_max_abs_index == old_v_max_abs_index) {
107 // Break if the solution stagnated.
108 break;
109 }
110 // Move to the new simplex e_j, where j = v_max_abs_index.
111 v = dec.solve(Vector::Unit(n, v_max_abs_index)); // v = inv(matrix) * e_j.
112 lower_bound = v.template lpNorm<1>();
113 if (lower_bound <= old_lower_bound) {
114 // Break if the gradient step did not increase the lower_bound.
115 break;
116 }
117 if (!is_complex) {
118 old_sign_vector = sign_vector;
119 }
120 old_v_max_abs_index = v_max_abs_index;
121 old_lower_bound = lower_bound;
122 }
123 // The following calculates an independent estimate of ||matrix||_1 by
124 // multiplying matrix by a vector with entries of slowly increasing
125 // magnitude and alternating sign:
126 // v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.
127 // This improvement to Hager's algorithm above is due to Higham. It was
128 // added to make the algorithm more robust in certain corner cases where
129 // large elements in the matrix might otherwise escape detection due to
130 // exact cancellation (especially when op and op_adjoint correspond to a
131 // sequence of backsubstitutions and permutations), which could cause
132 // Hager's algorithm to vastly underestimate ||matrix||_1.
133 Scalar alternating_sign(RealScalar(1));
134 for (Index i = 0; i < n; ++i) {
135 // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates
136 v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));
137 alternating_sign = -alternating_sign;
138 }
139 v = dec.solve(v);
140 const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n));
141 return numext::maxi(lower_bound, alternate_lower_bound);
142}
Definition ConditionEstimator.h:18

References eigen_assert, Eigen::numext::maxi(), and Eigen::internal::rcond_compute_sign< Vector, RealVector, IsComplex >::run().

Referenced by rcond_estimate_helper().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ real_2x2_jacobi_svd()

template<typename MatrixType , typename RealScalar , typename Index >
void Eigen::internal::real_2x2_jacobi_svd ( const MatrixType &  matrix,
Index  p,
Index  q,
JacobiRotation< RealScalar > *  j_left,
JacobiRotation< RealScalar > *  j_right 
)
22{
23 using std::sqrt;
24 using std::abs;
26 m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
27 numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
29 RealScalar t = m.coeff(0,0) + m.coeff(1,1);
30 RealScalar d = m.coeff(1,0) - m.coeff(0,1);
31
32 if(abs(d) < (std::numeric_limits<RealScalar>::min)())
33 {
34 rot1.s() = RealScalar(0);
35 rot1.c() = RealScalar(1);
36 }
37 else
38 {
39 // If d!=0, then t/d cannot overflow because the magnitude of the
40 // entries forming d are not too small compared to the ones forming t.
41 RealScalar u = t / d;
42 RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u));
43 rot1.s() = RealScalar(1) / tmp;
44 rot1.c() = u / tmp;
45 }
46 m.applyOnTheLeft(0,1,rot1);
47 j_right->makeJacobi(m,0,1);
48 *j_left = rot1 * j_right->transpose();
49}
bool makeJacobi(const MatrixBase< Derived > &, Index p, Index q)
Definition Jacobi.h:126
JacobiRotation transpose() const
Definition Jacobi.h:59
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & coeff(Index rowId, Index colId) const
Definition PlainObjectBase.h:160

References Eigen::JacobiRotation< Scalar >::c(), Eigen::PlainObjectBase< Derived >::coeff(), Eigen::JacobiRotation< Scalar >::makeJacobi(), Eigen::JacobiRotation< Scalar >::s(), sqrt(), and Eigen::JacobiRotation< Scalar >::transpose().

Referenced by Eigen::RealQZ< _MatrixType >::compute(), and Eigen::JacobiSVD< _MatrixType, QRPreconditioner >::compute().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ resize_if_allowed() [1/2]

template<typename DstXprType , typename SrcXprType , typename Functor >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::resize_if_allowed ( DstXprType &  dst,
const SrcXprType &  src,
const Functor &   
)
707{
710 eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
711}
#define EIGEN_ONLY_USED_FOR_DEBUG(x)
Definition Macros.h:591

References eigen_assert, and EIGEN_ONLY_USED_FOR_DEBUG.

Referenced by call_dense_assignment_loop(), and Eigen::internal::Assignment< DstXprType, SrcXprType, Functor, Sparse2Dense >::run().

+ Here is the caller graph for this function:

◆ resize_if_allowed() [2/2]

template<typename DstXprType , typename SrcXprType , typename T1 , typename T2 >
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Eigen::internal::resize_if_allowed ( DstXprType &  dst,
const SrcXprType &  src,
const internal::assign_op< T1, T2 > &   
)
716{
717 Index dstRows = src.rows();
718 Index dstCols = src.cols();
719 if(((dst.rows()!=dstRows) || (dst.cols()!=dstCols)))
720 dst.resize(dstRows, dstCols);
721 eigen_assert(dst.rows() == dstRows && dst.cols() == dstCols);
722}

References eigen_assert.

◆ return_ptr()

template<typename T >
const T * Eigen::internal::return_ptr ( )

◆ set_from_triplets()

template<typename InputIterator , typename SparseMatrixType , typename DupFunctor >
void Eigen::internal::set_from_triplets ( const InputIterator &  begin,
const InputIterator &  end,
SparseMatrixType &  mat,
DupFunctor  dup_func 
)
921{
922 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
923 typedef typename SparseMatrixType::Scalar Scalar;
924 typedef typename SparseMatrixType::StorageIndex StorageIndex;
926
927 if(begin!=end)
928 {
929 // pass 1: count the nnz per inner-vector
930 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
931 wi.setZero();
932 for(InputIterator it(begin); it!=end; ++it)
933 {
934 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
935 wi(IsRowMajor ? it->col() : it->row())++;
936 }
937
938 // pass 2: insert all the elements into trMat
939 trMat.reserve(wi);
940 for(InputIterator it(begin); it!=end; ++it)
941 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
942
943 // pass 3:
944 trMat.collapseDuplicates(dup_func);
945 }
946
947 // pass 4: transposed copy -> implicit sorting
948 mat = trMat;
949}

References Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::collapseDuplicates(), eigen_assert, Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::insertBackUncompressed(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::outerSize(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::reserve(), and set_from_triplets().

Referenced by set_from_triplets().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ smart_copy()

template<typename T >
EIGEN_DEVICE_FUNC void Eigen::internal::smart_copy ( const T *  start,
const T *  end,
T *  target 
)

◆ smart_memmove()

template<typename T >
void Eigen::internal::smart_memmove ( const T *  start,
const T *  end,
T *  target 
)
509{
511}

References Eigen::end().

Referenced by Eigen::internal::CompressedStorage< _Scalar, _StorageIndex >::atWithInsertion(), and Eigen::internal::sparse_matrix_block_impl< SparseMatrixType, BlockRows, BlockCols >::operator=().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ solve_sparse_through_dense_panels() [1/2]

template<typename Decomposition , typename Rhs , typename Dest >
enable_if< Rhs::ColsAtCompileTime!=1 &&Dest::ColsAtCompileTime!=1 >::type Eigen::internal::solve_sparse_through_dense_panels ( const Decomposition &  dec,
const Rhs &  rhs,
Dest &  dest 
)
24{
25 EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
26 typedef typename Dest::Scalar DestScalar;
27 // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
28 static const Index NbColsAtOnce = 4;
29 Index rhsCols = rhs.cols();
30 Index size = rhs.rows();
31 // the temporary matrices do not need more columns than NbColsAtOnce:
32 Index tmpCols = (std::min)(rhsCols, NbColsAtOnce);
35 for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
36 {
37 Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
38 tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
39 tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
40 dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
41 }
42}

References EIGEN_STATIC_ASSERT, and Eigen::RowMajorBit.

Referenced by Eigen::SimplicialCholeskyBase< Derived >::_solve_impl(), Eigen::SimplicialCholesky< _MatrixType, _UpLo, _Ordering >::_solve_impl(), and Eigen::SparseSolverBase< Derived >::_solve_impl().

+ Here is the caller graph for this function:

◆ solve_sparse_through_dense_panels() [2/2]

template<typename Decomposition , typename Rhs , typename Dest >
enable_if< Rhs::ColsAtCompileTime==1||Dest::ColsAtCompileTime==1 >::type Eigen::internal::solve_sparse_through_dense_panels ( const Decomposition &  dec,
const Rhs &  rhs,
Dest &  dest 
)
48{
49 typedef typename Dest::Scalar DestScalar;
50 Index size = rhs.rows();
53 dest_dense = dec.solve(rhs_dense);
54 dest = dest_dense.sparseView();
55}

◆ sparse_selfadjoint_time_dense_product()

template<int Mode, typename SparseLhsType , typename DenseRhsType , typename DenseResType , typename AlphaType >
void Eigen::internal::sparse_selfadjoint_time_dense_product ( const SparseLhsType &  lhs,
const DenseRhsType &  rhs,
DenseResType &  res,
const AlphaType &  alpha 
)
inline
281{
283
285 typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
287 typedef typename LhsEval::InnerIterator LhsIterator;
288 typedef typename SparseLhsType::Scalar LhsScalar;
289
290 enum {
291 LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
292 ProcessFirstHalf =
293 ((Mode&(Upper|Lower))==(Upper|Lower))
294 || ( (Mode&Upper) && !LhsIsRowMajor)
295 || ( (Mode&Lower) && LhsIsRowMajor),
296 ProcessSecondHalf = !ProcessFirstHalf
297 };
298
299 SparseLhsTypeNested lhs_nested(lhs);
300 LhsEval lhsEval(lhs_nested);
301
302 // work on one column at once
303 for (Index k=0; k<rhs.cols(); ++k)
304 {
305 for (Index j=0; j<lhs.outerSize(); ++j)
306 {
307 LhsIterator i(lhsEval,j);
308 // handle diagonal coeff
309 if (ProcessSecondHalf)
310 {
311 while (i && i.index()<j) ++i;
312 if(i && i.index()==j)
313 {
314 res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
315 ++i;
316 }
317 }
318
319 // premultiplied rhs for scatters
320 typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
321 // accumulator for partial scalar product
322 typename DenseResType::Scalar res_j(0);
323 for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
324 {
325 LhsScalar lhs_ij = i.value();
326 if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
327 res_j += lhs_ij * rhs.coeff(i.index(),k);
328 res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
329 }
330 res.coeffRef(j,k) += alpha * res_j;
331
332 // handle diagonal coeff
333 if (ProcessFirstHalf && i && (i.index()==j))
334 res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
335 }
336 }
337}

References EIGEN_ONLY_USED_FOR_DEBUG, Eigen::Lower, Eigen::RowMajorBit, sparse_selfadjoint_time_dense_product(), and Eigen::Upper.

Referenced by sparse_selfadjoint_time_dense_product().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ sparse_sparse_product_with_pruning_impl()

template<typename Lhs , typename Rhs , typename ResultType >
static void Eigen::internal::sparse_sparse_product_with_pruning_impl ( const Lhs &  lhs,
const Rhs &  rhs,
ResultType &  res,
const typename ResultType::RealScalar &  tolerance 
)
static
21{
22 // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
23
24 typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
25 typedef typename remove_all<ResultType>::type::Scalar ResScalar;
26 typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
27
28 // make sure to call innerSize/outerSize since we fake the storage order.
29 Index rows = lhs.innerSize();
30 Index cols = rhs.outerSize();
31 //Index size = lhs.outerSize();
32 eigen_assert(lhs.outerSize() == rhs.innerSize());
33
34 // allocate a temporary buffer
36
37 // mimics a resizeByInnerOuter:
38 if(ResultType::IsRowMajor)
39 res.resize(cols, rows);
40 else
41 res.resize(rows, cols);
42
43 evaluator<Lhs> lhsEval(lhs);
44 evaluator<Rhs> rhsEval(rhs);
45
46 // estimate the number of non zero entries
47 // given a rhs column containing Y non zeros, we assume that the respective Y columns
48 // of the lhs differs in average of one non zeros, thus the number of non zeros for
49 // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
50 // per column of the lhs.
51 // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
52 Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
53
54 res.reserve(estimated_nnz_prod);
55 double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols()));
56 for (Index j=0; j<cols; ++j)
57 {
58 // FIXME:
59 //double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
60 // let's do a more accurate determination of the nnz ratio for the current column j of res
61 tempVector.init(ratioColRes);
62 tempVector.setZero();
63 for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
64 {
65 // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
66 tempVector.restart();
67 RhsScalar x = rhsIt.value();
68 for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
69 {
70 tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
71 }
72 }
73 res.startVec(j);
74 for (typename AmbiVector<ResScalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
75 res.insertBackByOuterInner(j,it.index()) = it.value();
76 }
77 res.finalize();
78}
Definition AmbiVector.h:24

References Eigen::internal::AmbiVector< _Scalar, _StorageIndex >::coeffRef(), eigen_assert, Eigen::internal::AmbiVector< _Scalar, _StorageIndex >::init(), Eigen::internal::AmbiVector< _Scalar, _StorageIndex >::restart(), and Eigen::internal::AmbiVector< _Scalar, _StorageIndex >::setZero().

+ Here is the call graph for this function:

◆ sparse_sparse_to_dense_product_impl()

template<typename Lhs , typename Rhs , typename ResultType >
static void Eigen::internal::sparse_sparse_to_dense_product_impl ( const Lhs &  lhs,
const Rhs &  rhs,
ResultType &  res 
)
static
271{
272 typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
273 typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
274 Index cols = rhs.outerSize();
275 eigen_assert(lhs.outerSize() == rhs.innerSize());
276
277 evaluator<Lhs> lhsEval(lhs);
278 evaluator<Rhs> rhsEval(rhs);
279
280 for (Index j=0; j<cols; ++j)
281 {
282 for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
283 {
284 RhsScalar y = rhsIt.value();
285 Index k = rhsIt.index();
286 for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
287 {
288 Index i = lhsIt.index();
289 LhsScalar x = lhsIt.value();
290 res.coeffRef(i,j) += x * y;
291 }
292 }
293 }
294}

References eigen_assert, and y.

◆ sparse_time_dense_product()

template<typename SparseLhsType , typename DenseRhsType , typename DenseResType , typename AlphaType >
void Eigen::internal::sparse_time_dense_product ( const SparseLhsType &  lhs,
const DenseRhsType &  rhs,
DenseResType &  res,
const AlphaType &  alpha 
)
inline

◆ sparselu_gemm()

template<typename Scalar >
EIGEN_DONT_INLINE void Eigen::internal::sparselu_gemm ( Index  m,
Index  n,
Index  d,
const Scalar *  A,
Index  lda,
const Scalar *  B,
Index  ldb,
Scalar *  C,
Index  ldc 
)
27{
28 using namespace Eigen::internal;
29
30 typedef typename packet_traits<Scalar>::type Packet;
31 enum {
33 PacketSize = packet_traits<Scalar>::size,
34 PM = 8, // peeling in M
35 RN = 2, // register blocking
36 RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking
37 BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk
38 SM = PM*PacketSize // step along M
39 };
40 Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking
41 Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once
42 Index i0 = internal::first_default_aligned(A,m);
43
44 eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m)));
45
46 // handle the non aligned rows of A and C without any optimization:
47 for(Index i=0; i<i0; ++i)
48 {
49 for(Index j=0; j<n; ++j)
50 {
51 Scalar c = C[i+j*ldc];
52 for(Index k=0; k<d; ++k)
53 c += B[k+j*ldb] * A[i+k*lda];
54 C[i+j*ldc] = c;
55 }
56 }
57 // process the remaining rows per chunk of BM rows
58 for(Index ib=i0; ib<m; ib+=BM)
59 {
60 Index actual_b = std::min<Index>(BM, m-ib); // actual number of rows
61 Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling
62 Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization
63
64 // Let's process two columns of B-C at once
65 for(Index j=0; j<n_end; j+=RN)
66 {
67 const Scalar* Bc0 = B+(j+0)*ldb;
68 const Scalar* Bc1 = B+(j+1)*ldb;
69
70 for(Index k=0; k<d_end; k+=RK)
71 {
72
73 // load and expand a RN x RK block of B
74 Packet b00, b10, b20, b30, b01, b11, b21, b31;
75 { b00 = pset1<Packet>(Bc0[0]); }
76 { b10 = pset1<Packet>(Bc0[1]); }
77 if(RK==4) { b20 = pset1<Packet>(Bc0[2]); }
78 if(RK==4) { b30 = pset1<Packet>(Bc0[3]); }
79 { b01 = pset1<Packet>(Bc1[0]); }
80 { b11 = pset1<Packet>(Bc1[1]); }
81 if(RK==4) { b21 = pset1<Packet>(Bc1[2]); }
82 if(RK==4) { b31 = pset1<Packet>(Bc1[3]); }
83
84 Packet a0, a1, a2, a3, c0, c1, t0, t1;
85
86 const Scalar* A0 = A+ib+(k+0)*lda;
87 const Scalar* A1 = A+ib+(k+1)*lda;
88 const Scalar* A2 = A+ib+(k+2)*lda;
89 const Scalar* A3 = A+ib+(k+3)*lda;
90
91 Scalar* C0 = C+ib+(j+0)*ldc;
92 Scalar* C1 = C+ib+(j+1)*ldc;
93
94 a0 = pload<Packet>(A0);
95 a1 = pload<Packet>(A1);
96 if(RK==4)
97 {
98 a2 = pload<Packet>(A2);
99 a3 = pload<Packet>(A3);
100 }
101 else
102 {
103 // workaround "may be used uninitialized in this function" warning
104 a2 = a3 = a0;
105 }
106
107#define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);}
108#define WORK(I) \
109 c0 = pload<Packet>(C0+i+(I)*PacketSize); \
110 c1 = pload<Packet>(C1+i+(I)*PacketSize); \
111 KMADD(c0, a0, b00, t0) \
112 KMADD(c1, a0, b01, t1) \
113 a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
114 KMADD(c0, a1, b10, t0) \
115 KMADD(c1, a1, b11, t1) \
116 a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
117 if(RK==4){ KMADD(c0, a2, b20, t0) }\
118 if(RK==4){ KMADD(c1, a2, b21, t1) }\
119 if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\
120 if(RK==4){ KMADD(c0, a3, b30, t0) }\
121 if(RK==4){ KMADD(c1, a3, b31, t1) }\
122 if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
123 pstore(C0+i+(I)*PacketSize, c0); \
124 pstore(C1+i+(I)*PacketSize, c1)
125
126 // process rows of A' - C' with aggressive vectorization and peeling
127 for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
128 {
129 EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
130 prefetch((A0+i+(5)*PacketSize));
131 prefetch((A1+i+(5)*PacketSize));
132 if(RK==4) prefetch((A2+i+(5)*PacketSize));
133 if(RK==4) prefetch((A3+i+(5)*PacketSize));
134
135 WORK(0);
136 WORK(1);
137 WORK(2);
138 WORK(3);
139 WORK(4);
140 WORK(5);
141 WORK(6);
142 WORK(7);
143 }
144 // process the remaining rows with vectorization only
145 for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
146 {
147 WORK(0);
148 }
149#undef WORK
150 // process the remaining rows without vectorization
151 for(Index i=actual_b_end2; i<actual_b; ++i)
152 {
153 if(RK==4)
154 {
155 C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
156 C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];
157 }
158 else
159 {
160 C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
161 C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];
162 }
163 }
164
165 Bc0 += RK;
166 Bc1 += RK;
167 } // peeled loop on k
168 } // peeled loop on the columns j
169 // process the last column (we now perform a matrix-vector product)
170 if((n-n_end)>0)
171 {
172 const Scalar* Bc0 = B+(n-1)*ldb;
173
174 for(Index k=0; k<d_end; k+=RK)
175 {
176
177 // load and expand a 1 x RK block of B
178 Packet b00, b10, b20, b30;
179 b00 = pset1<Packet>(Bc0[0]);
180 b10 = pset1<Packet>(Bc0[1]);
181 if(RK==4) b20 = pset1<Packet>(Bc0[2]);
182 if(RK==4) b30 = pset1<Packet>(Bc0[3]);
183
184 Packet a0, a1, a2, a3, c0, t0/*, t1*/;
185
186 const Scalar* A0 = A+ib+(k+0)*lda;
187 const Scalar* A1 = A+ib+(k+1)*lda;
188 const Scalar* A2 = A+ib+(k+2)*lda;
189 const Scalar* A3 = A+ib+(k+3)*lda;
190
191 Scalar* C0 = C+ib+(n_end)*ldc;
192
193 a0 = pload<Packet>(A0);
194 a1 = pload<Packet>(A1);
195 if(RK==4)
196 {
197 a2 = pload<Packet>(A2);
198 a3 = pload<Packet>(A3);
199 }
200 else
201 {
202 // workaround "may be used uninitialized in this function" warning
203 a2 = a3 = a0;
204 }
205
206#define WORK(I) \
207 c0 = pload<Packet>(C0+i+(I)*PacketSize); \
208 KMADD(c0, a0, b00, t0) \
209 a0 = pload<Packet>(A0+i+(I+1)*PacketSize); \
210 KMADD(c0, a1, b10, t0) \
211 a1 = pload<Packet>(A1+i+(I+1)*PacketSize); \
212 if(RK==4){ KMADD(c0, a2, b20, t0) }\
213 if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\
214 if(RK==4){ KMADD(c0, a3, b30, t0) }\
215 if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
216 pstore(C0+i+(I)*PacketSize, c0);
217
218 // agressive vectorization and peeling
219 for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
220 {
221 EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");
222 WORK(0);
223 WORK(1);
224 WORK(2);
225 WORK(3);
226 WORK(4);
227 WORK(5);
228 WORK(6);
229 WORK(7);
230 }
231 // vectorization only
232 for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
233 {
234 WORK(0);
235 }
236 // remaining scalars
237 for(Index i=actual_b_end2; i<actual_b; ++i)
238 {
239 if(RK==4)
240 C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
241 else
242 C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
243 }
244
245 Bc0 += RK;
246#undef WORK
247 }
248 }
249
250 // process the last columns of A, corresponding to the last rows of B
251 Index rd = d-d_end;
252 if(rd>0)
253 {
254 for(Index j=0; j<n; ++j)
255 {
256 enum {
257 Alignment = PacketSize>1 ? Aligned : 0
258 };
259 typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector;
260 typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector;
261 if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);
262
263 else if(rd==2) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
264 + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b);
265
266 else MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)
267 + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b)
268 + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b);
269 }
270 }
271
272 } // blocking on the rows of A and C
273}
#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
Definition PacketMath.h:31
#define EIGEN_ASM_COMMENT(X)
Definition Macros.h:624
#define WORK(I)
A matrix or vector expression mapping an existing array of data.
Definition Map.h:96
@ Aligned
Definition Constants.h:235
Definition LDLT.h:18
EIGEN_DEVICE_FUNC void prefetch(const Scalar *addr)
Definition GenericPacketMath.h:299

References Eigen::Aligned, EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, EIGEN_ASM_COMMENT, eigen_internal_assert, first_default_aligned(), prefetch(), and WORK.

+ Here is the call graph for this function:

◆ stable_norm_kernel()

template<typename ExpressionType , typename Scalar >
void Eigen::internal::stable_norm_kernel ( const ExpressionType &  bl,
Scalar &  ssq,
Scalar &  scale,
Scalar &  invScale 
)
inline
19{
20 Scalar maxCoeff = bl.cwiseAbs().maxCoeff();
21
22 if(maxCoeff>scale)
23 {
24 ssq = ssq * numext::abs2(scale/maxCoeff);
25 Scalar tmp = Scalar(1)/maxCoeff;
27 {
28 invScale = NumTraits<Scalar>::highest();
29 scale = Scalar(1)/invScale;
30 }
31 else if(maxCoeff>NumTraits<Scalar>::highest()) // we got a INF
32 {
33 invScale = Scalar(1);
34 scale = maxCoeff;
35 }
36 else
37 {
38 scale = maxCoeff;
39 invScale = tmp;
40 }
41 }
42 else if(maxCoeff!=maxCoeff) // we got a NaN
43 {
44 scale = maxCoeff;
45 }
46
47 // TODO if the maxCoeff is much much smaller than the current scale,
48 // then we can neglect this sub vector
49 if(scale>Scalar(0)) // if scale==0, then bl is 0
50 ssq += (bl*invScale).squaredNorm();
51}
int scale(const int val)
Definition WipeTowerDialog.cpp:14

References scale().

Referenced by Eigen::MatrixBase< Derived >::stableNorm().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ swap()

template<typename T >
void Eigen::internal::swap ( scoped_array< T > &  a,
scoped_array< T > &  b 
)
603{
604 std::swap(a.ptr(),b.ptr());
605}

Referenced by Eigen::internal::swap_assign_op< Scalar >::assignCoeff(), and QuickSplit().

+ Here is the caller graph for this function:

◆ throw_std_bad_alloc()

EIGEN_DEVICE_FUNC void Eigen::internal::throw_std_bad_alloc ( )
inline
68{
69 #ifdef EIGEN_EXCEPTIONS
70 throw std::bad_alloc();
71 #else
72 std::size_t huge = static_cast<std::size_t>(-1);
73 ::operator new(huge);
74 #endif
75}

Referenced by aligned_malloc(), aligned_realloc(), check_size_for_overflow(), conditional_aligned_malloc< false >(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::conservativeResize(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::reserveInnerVectors(), Eigen::SparseMatrix< _Scalar, _Options, _StorageIndex >::resize(), Eigen::internal::CompressedStorage< _Scalar, _StorageIndex >::resize(), and Eigen::internal::check_rows_cols_for_overflow< Dynamic >::run().

+ Here is the caller graph for this function:

◆ toRotationMatrix() [1/3]

template<typename Scalar , int Dim, typename OtherDerived >
static EIGEN_DEVICE_FUNC const MatrixBase< OtherDerived > & Eigen::internal::toRotationMatrix ( const MatrixBase< OtherDerived > &  mat)
inlinestatic
196{
197 EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
198 YOU_MADE_A_PROGRAMMING_MISTAKE)
199 return mat;
200}

References EIGEN_STATIC_ASSERT.

◆ toRotationMatrix() [2/3]

template<typename Scalar , int Dim, typename OtherDerived >
static EIGEN_DEVICE_FUNC Matrix< Scalar, Dim, Dim > Eigen::internal::toRotationMatrix ( const RotationBase< OtherDerived, Dim > &  r)
inlinestatic
190{
191 return r.toRotationMatrix();
192}
EIGEN_DEVICE_FUNC RotationMatrixType toRotationMatrix() const
Definition RotationBase.h:45

References Eigen::RotationBase< Derived, _Dim >::toRotationMatrix().

+ Here is the call graph for this function:

◆ toRotationMatrix() [3/3]

template<typename Scalar , int Dim>
static EIGEN_DEVICE_FUNC Matrix< Scalar, 2, 2 > Eigen::internal::toRotationMatrix ( const Scalar &  s)
inlinestatic
183{
184 EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)
186}
EIGEN_DEVICE_FUNC Matrix2 toRotationMatrix() const
Definition Rotation2D.h:188
Represents a rotation/orientation in a 2 dimensional space.
Definition Rotation2D.h:42

References EIGEN_STATIC_ASSERT, and Eigen::Rotation2D< _Scalar >::toRotationMatrix().

+ Here is the call graph for this function:

◆ treePostorder()

template<typename IndexVector >
void Eigen::internal::treePostorder ( typename IndexVector::Scalar  n,
IndexVector &  parent,
IndexVector &  post 
)

Post order a tree.

Parameters
nthe number of nodes
parentInput tree
postpostordered tree
179{
180 typedef typename IndexVector::Scalar StorageIndex;
181 IndexVector first_kid, next_kid; // Linked list of children
182 StorageIndex postnum;
183 // Allocate storage for working arrays and results
184 first_kid.resize(n+1);
185 next_kid.setZero(n+1);
186 post.setZero(n+1);
187
188 // Set up structure describing children
189 first_kid.setConstant(-1);
190 for (StorageIndex v = n-1; v >= 0; v--)
191 {
192 StorageIndex dad = parent(v);
193 next_kid(v) = first_kid(dad);
194 first_kid(dad) = v;
195 }
196
197 // Depth-first search from dummy root vertex #n
198 postnum = 0;
199 internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
200}

References nr_etdfs().

Referenced by Eigen::SparseLU< _MatrixType, _OrderingType >::analyzePattern(), and Eigen::internal::SparseLUImpl< Scalar, StorageIndex >::heap_relax_snode().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ tridiagonal_qr_step()

template<int StorageOrder, typename RealScalar , typename Scalar , typename Index >
static EIGEN_DEVICE_FUNC void Eigen::internal::tridiagonal_qr_step ( RealScalar *  diag,
RealScalar *  subdiag,
Index  start,
Index  end,
Scalar *  matrixQ,
Index  n 
)
static
809{
810 using std::abs;
811 RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
812 RealScalar e = subdiag[end-1];
813 // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
814 // underflow thus leading to inf/NaN values when using the following commented code:
815// RealScalar e2 = numext::abs2(subdiag[end-1]);
816// RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
817 // This explain the following, somewhat more complicated, version:
818 RealScalar mu = diag[end];
819 if(td==RealScalar(0))
820 mu -= abs(e);
821 else
822 {
823 RealScalar e2 = numext::abs2(subdiag[end-1]);
824 RealScalar h = numext::hypot(td,e);
825 if(e2==RealScalar(0)) mu -= (e / (td + (td>RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);
826 else mu -= e2 / (td + (td>RealScalar(0) ? h : -h));
827 }
828
829 RealScalar x = diag[start] - mu;
830 RealScalar z = subdiag[start];
831 for (Index k = start; k < end; ++k)
832 {
834 rot.makeGivens(x, z);
835
836 // do T = G' T G
837 RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
838 RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
839
840 diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
841 diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
842 subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
843
844
845 if (k > start)
846 subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
847
848 x = subdiag[k];
849
850 if (k < end - 1)
851 {
852 z = -rot.s() * subdiag[k+1];
853 subdiag[k + 1] = rot.c() * subdiag[k+1];
854 }
855
856 // apply the givens rotation to the unit matrix Q = Q * G
857 if (matrixQ)
858 {
859 // FIXME if StorageOrder == RowMajor this operation is not very efficient
860 Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
861 q.applyOnTheRight(k,k+1,rot);
862 }
863 }
864}

References Eigen::JacobiRotation< Scalar >::c(), Eigen::end(), Eigen::JacobiRotation< Scalar >::makeGivens(), and Eigen::JacobiRotation< Scalar >::s().

+ Here is the call graph for this function:

◆ tridiagonalization_inplace() [1/2]

template<typename MatrixType , typename DiagonalType , typename SubDiagonalType >
void Eigen::internal::tridiagonalization_inplace ( MatrixType &  mat,
DiagonalType &  diag,
SubDiagonalType &  subdiag,
bool  extractQ 
)

Performs a full tridiagonalization in place.

Parameters
[in,out]matOn input, the selfadjoint matrix whose tridiagonal decomposition is to be computed. Only the lower triangular part referenced. The rest is left unchanged. On output, the orthogonal matrix Q in the decomposition if extractQ is true.
[out]diagThe diagonal of the tridiagonal matrix T in the decomposition.
[out]subdiagThe subdiagonal of the tridiagonal matrix T in the decomposition.
[in]extractQIf true, the orthogonal matrix Q in the decomposition is computed and stored in mat.

Computes the tridiagonal decomposition of the selfadjoint matrix mat in place such that $ mat = Q T Q^* $ where $ Q $ is unitary and $ T $ a real symmetric tridiagonal matrix.

The tridiagonal matrix T is passed to the output parameters diag and subdiag. If extractQ is true, then the orthogonal matrix Q is passed to mat. Otherwise the lower part of the matrix mat is destroyed.

The vectors diag and subdiag are not resized. The function assumes that they are already of the correct size. The length of the vector diag should equal the number of rows in mat, and the length of the vector subdiag should be one left.

This implementation contains an optimized path for 3-by-3 matrices which is especially useful for plane fitting.

Note
Currently, it requires two temporary vectors to hold the intermediate Householder coefficients, and to reconstruct the matrix Q from the Householder reflectors.

Example (this uses the same matrix as the example in Tridiagonalization::Tridiagonalization(const MatrixType&)):

Output:

See also
class Tridiagonalization
428{
429 eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);
430 tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);
431}
Definition Tridiagonalization.h:438

References eigen_assert, and tridiagonalization_inplace().

+ Here is the call graph for this function:

◆ tridiagonalization_inplace() [2/2]

template<typename MatrixType , typename CoeffVectorType >
void Eigen::internal::tridiagonalization_inplace ( MatrixType &  matA,
CoeffVectorType &  hCoeffs 
)
348{
349 using numext::conj;
350 typedef typename MatrixType::Scalar Scalar;
351 typedef typename MatrixType::RealScalar RealScalar;
352 Index n = matA.rows();
353 eigen_assert(n==matA.cols());
354 eigen_assert(n==hCoeffs.size()+1 || n==1);
355
356 for (Index i = 0; i<n-1; ++i)
357 {
358 Index remainingSize = n-i-1;
359 RealScalar beta;
360 Scalar h;
361 matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);
362
363 // Apply similarity transformation to remaining columns,
364 // i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)
365 matA.col(i).coeffRef(i+1) = 1;
366
367 hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView<Lower>()
368 * (conj(h) * matA.col(i).tail(remainingSize)));
369
370 hCoeffs.tail(n-i-1) += (conj(h)*RealScalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);
371
372 matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>()
373 .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), Scalar(-1));
374
375 matA.col(i).coeffRef(i+1) = beta;
376 hCoeffs.coeffRef(i) = h;
377 }
378}

References eigen_assert, and tridiagonalization_inplace().

Referenced by Eigen::Tridiagonalization< _MatrixType >::Tridiagonalization(), Eigen::SelfAdjointEigenSolver< _MatrixType >::compute(), Eigen::Tridiagonalization< _MatrixType >::compute(), Eigen::internal::tridiagonalization_inplace_selector< MatrixType, Size, IsComplex >::run(), tridiagonalization_inplace(), and tridiagonalization_inplace().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ upperbidiagonalization_blocked_helper()

template<typename MatrixType >
void Eigen::internal::upperbidiagonalization_blocked_helper ( MatrixType &  A,
typename MatrixType::RealScalar *  diagonal,
typename MatrixType::RealScalar *  upper_diagonal,
Index  bs,
Ref< Matrix< typename MatrixType::Scalar, Dynamic, Dynamic, traits< MatrixType >::Flags &RowMajorBit > >  X,
Ref< Matrix< typename MatrixType::Scalar, Dynamic, Dynamic, traits< MatrixType >::Flags &RowMajorBit > >  Y 
)
160{
161 typedef typename MatrixType::Scalar Scalar;
162 typedef typename MatrixType::RealScalar RealScalar;
163 typedef typename NumTraits<RealScalar>::Literal Literal;
164 enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
165 typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;
166 typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;
167 typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType;
168 typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType;
170
171 Index brows = A.rows();
172 Index bcols = A.cols();
173
174 Scalar tau_u, tau_u_prev(0), tau_v;
175
176 for(Index k = 0; k < bs; ++k)
177 {
178 Index remainingRows = brows - k;
179 Index remainingCols = bcols - k - 1;
180
181 SubMatType X_k1( X.block(k,0, remainingRows,k) );
182 SubMatType V_k1( A.block(k,0, remainingRows,k) );
183
184 // 1 - update the k-th column of A
185 SubColumnType v_k = A.col(k).tail(remainingRows);
186 v_k -= V_k1 * Y.row(k).head(k).adjoint();
187 if(k) v_k -= X_k1 * A.col(k).head(k);
188
189 // 2 - construct left Householder transform in-place
190 v_k.makeHouseholderInPlace(tau_v, diagonal[k]);
191
192 if(k+1<bcols)
193 {
194 SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) );
195 SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );
196
197 // this eases the application of Householder transforAions
198 // A(k,k) will store tau_v later
199 A(k,k) = Scalar(1);
200
201 // 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )
202 {
203 SubColumnType y_k( Y.col(k).tail(remainingCols) );
204
205 // let's use the begining of column k of Y as a temporary vector
206 SubColumnType tmp( Y.col(k).head(k) );
207 y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
208 tmp.noalias() = V_k1.adjoint() * v_k;
209 y_k.noalias() -= Y_k.leftCols(k) * tmp;
210 tmp.noalias() = X_k1.adjoint() * v_k;
211 y_k.noalias() -= U_k1.adjoint() * tmp;
212 y_k *= numext::conj(tau_v);
213 }
214
215 // 4 - update k-th row of A (it will become u_k)
216 SubRowType u_k( A.row(k).tail(remainingCols) );
217 u_k = u_k.conjugate();
218 {
219 u_k -= Y_k * A.row(k).head(k+1).adjoint();
220 if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();
221 }
222
223 // 5 - construct right Householder transform in-place
224 u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);
225
226 // this eases the application of Householder transformations
227 // A(k,k+1) will store tau_u later
228 A(k,k+1) = Scalar(1);
229
230 // 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )
231 {
232 SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
233
234 // let's use the begining of column k of X as a temporary vectors
235 // note that tmp0 and tmp1 overlaps
236 SubColumnType tmp0 ( X.col(k).head(k) ),
237 tmp1 ( X.col(k).head(k+1) );
238
239 x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck
240 tmp0.noalias() = U_k1 * u_k.transpose();
241 x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0;
242 tmp1.noalias() = Y_k.adjoint() * u_k.transpose();
243 x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;
244 x_k *= numext::conj(tau_u);
245 tau_u = numext::conj(tau_u);
246 u_k = u_k.conjugate();
247 }
248
249 if(k>0) A.coeffRef(k-1,k) = tau_u_prev;
250 tau_u_prev = tau_u;
251 }
252 else
253 A.coeffRef(k-1,k) = tau_u_prev;
254
255 A.coeffRef(k,k) = tau_v;
256 }
257
258 if(bs<bcols)
259 A.coeffRef(bs-1,bs) = tau_u_prev;
260
261 // update A22
262 if(bcols>bs && brows>bs)
263 {
264 SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );
265 SubMatType A10( A.block(bs,0, brows-bs,bs) );
266 SubMatType A01( A.block(0,bs, bs,bcols-bs) );
267 Scalar tmp = A01(bs-1,0);
268 A01(bs-1,0) = Literal(1);
269 A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
270 A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
271 A01(bs-1,0) = tmp;
272 }
273}
Convenience specialization of Stride to specify only an inner stride See class Map for some examples.
Definition Stride.h:91
A matrix or vector expression mapping an existing expression.
Definition Ref.h:194
@ Y
Definition libslic3r.h:99
@ X
Definition libslic3r.h:98
Definition ForwardDeclarations.h:17

References Eigen::ColMajor, Eigen::Dynamic, and Eigen::RowMajorBit.

◆ upperbidiagonalization_inplace_blocked()

template<typename MatrixType , typename BidiagType >
void Eigen::internal::upperbidiagonalization_inplace_blocked ( MatrixType &  A,
BidiagType &  bidiagonal,
Index  maxBlockSize = 32,
typename MatrixType::Scalar *  = 0 
)
287{
288 typedef typename MatrixType::Scalar Scalar;
289 typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
290
291 Index rows = A.rows();
292 Index cols = A.cols();
293 Index size = (std::min)(rows, cols);
294
295 // X and Y are work space
296 enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
297 Matrix<Scalar,
298 MatrixType::RowsAtCompileTime,
299 Dynamic,
300 StorageOrder,
301 MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
302 Matrix<Scalar,
303 MatrixType::ColsAtCompileTime,
304 Dynamic,
305 StorageOrder,
306 MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
307 Index blockSize = (std::min)(maxBlockSize,size);
308
309 Index k = 0;
310 for(k = 0; k < size; k += blockSize)
311 {
312 Index bs = (std::min)(size-k,blockSize); // actual size of the block
313 Index brows = rows - k; // rows of the block
314 Index bcols = cols - k; // columns of the block
315
316 // partition the matrix A:
317 //
318 // | A00 A01 A02 |
319 // | |
320 // A = | A10 A11 A12 |
321 // | |
322 // | A20 A21 A22 |
323 //
324 // where A11 is a bs x bs diagonal block,
325 // and let:
326 // | A11 A12 |
327 // B = | |
328 // | A21 A22 |
329
330 BlockType B = A.block(k,k,brows,bcols);
331
332 // This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.
333 // Finally, the algorithm continue on the updated A22.
334 //
335 // However, if B is too small, or A22 empty, then let's use an unblocked strategy
336 if(k+bs==cols || bcols<48) // somewhat arbitrary threshold
337 {
338 upperbidiagonalization_inplace_unblocked(B,
339 &(bidiagonal.template diagonal<0>().coeffRef(k)),
340 &(bidiagonal.template diagonal<1>().coeffRef(k)),
341 X.data()
342 );
343 break; // We're done
344 }
345 else
346 {
347 upperbidiagonalization_blocked_helper<BlockType>( B,
348 &(bidiagonal.template diagonal<0>().coeffRef(k)),
349 &(bidiagonal.template diagonal<1>().coeffRef(k)),
350 bs,
351 X.topLeftCorner(brows,bs),
352 Y.topLeftCorner(bcols,bs)
353 );
354 }
355 }
356}
Expression of a fixed-size or dynamic-size block.
Definition Block.h:105

References Eigen::Dynamic, Eigen::RowMajorBit, and upperbidiagonalization_inplace_unblocked().

Referenced by Eigen::internal::UpperBidiagonalization< _MatrixType >::compute().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ upperbidiagonalization_inplace_unblocked()

template<typename MatrixType >
void Eigen::internal::upperbidiagonalization_inplace_unblocked ( MatrixType &  mat,
typename MatrixType::RealScalar *  diagonal,
typename MatrixType::RealScalar *  upper_diagonal,
typename MatrixType::Scalar *  tempData = 0 
)
97{
98 typedef typename MatrixType::Scalar Scalar;
99
100 Index rows = mat.rows();
101 Index cols = mat.cols();
102
104 TempType tempVector;
105 if(tempData==0)
106 {
107 tempVector.resize(rows);
108 tempData = tempVector.data();
109 }
110
111 for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
112 {
113 Index remainingRows = rows - k;
114 Index remainingCols = cols - k - 1;
115
116 // construct left householder transform in-place in A
117 mat.col(k).tail(remainingRows)
118 .makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);
119 // apply householder transform to remaining part of A on the left
120 mat.bottomRightCorner(remainingRows, remainingCols)
121 .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);
122
123 if(k == cols-1) break;
124
125 // construct right householder transform in-place in mat
126 mat.row(k).tail(remainingCols)
127 .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
128 // apply householder transform to remaining part of mat on the left
129 mat.bottomRightCorner(remainingRows-1, remainingCols)
130 .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);
131 }
132}

References Eigen::PlainObjectBase< Derived >::resize().

Referenced by Eigen::internal::UpperBidiagonalization< _MatrixType >::computeUnblocked(), and upperbidiagonalization_inplace_blocked().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ useSpecificBlockingSizes()

template<typename Index >
bool Eigen::internal::useSpecificBlockingSizes ( Index k,
Index m,
Index n 
)
inline
264{
265#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES
266 if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {
267 k = numext::mini<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);
268 m = numext::mini<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);
269 n = numext::mini<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);
270 return true;
271 }
272#else
276#endif
277 return false;
278}

References EIGEN_UNUSED_VARIABLE.

Referenced by computeProductBlockingSizes().

+ Here is the caller graph for this function:

◆ vec_splat_packet4f()

template<int element>
EIGEN_STRONG_INLINE Packet4f Eigen::internal::vec_splat_packet4f ( const Packet4f from)
264{
265 Packet4f splat;
266 switch (element) {
267 case 0:
268 splat.v4f[0] = vec_splat(from.v4f[0], 0);
269 splat.v4f[1] = splat.v4f[0];
270 break;
271 case 1:
272 splat.v4f[0] = vec_splat(from.v4f[0], 1);
273 splat.v4f[1] = splat.v4f[0];
274 break;
275 case 2:
276 splat.v4f[0] = vec_splat(from.v4f[1], 0);
277 splat.v4f[1] = splat.v4f[0];
278 break;
279 case 3:
280 splat.v4f[0] = vec_splat(from.v4f[1], 1);
281 splat.v4f[1] = splat.v4f[0];
282 break;
283 }
284 return splat;
285}

References Eigen::internal::Packet4f::v4f.

Variable Documentation

◆ defaultL1CacheSize

◆ defaultL2CacheSize

const std::ptrdiff_t Eigen::internal::defaultL2CacheSize = 512*1024

◆ defaultL3CacheSize

const std::ptrdiff_t Eigen::internal::defaultL3CacheSize = 512*1024

◆ p16uc_COMPLEX32_REV

Packet16uc Eigen::internal::p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8)
static

◆ p16uc_COMPLEX32_REV2

Packet16uc Eigen::internal::p16uc_COMPLEX32_REV2 = vec_sld(p16uc_PSET64_HI, p16uc_PSET64_LO, 8)
static

Referenced by preverse().

◆ p16uc_DUPLICATE32_HI [1/2]

Packet16uc Eigen::internal::p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }
static

◆ p16uc_DUPLICATE32_HI [2/2]

Packet16uc Eigen::internal::p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 }
static

◆ p16uc_FORWARD [1/2]

Packet16uc Eigen::internal::p16uc_FORWARD = p16uc_REVERSE32
static

◆ p16uc_FORWARD [2/2]

Packet16uc Eigen::internal::p16uc_FORWARD = { 0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15 }
static

◆ p16uc_HALF64_0_16

Packet16uc Eigen::internal::p16uc_HALF64_0_16 = vec_sld(vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 0), (Packet16uc)p4i_ZERO, 8)
static

◆ p16uc_PSET32_WEVEN [1/2]

Packet16uc Eigen::internal::p16uc_PSET32_WEVEN = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8)
static

Referenced by pmul< Packet2cf >().

◆ p16uc_PSET32_WEVEN [2/2]

Packet16uc Eigen::internal::p16uc_PSET32_WEVEN = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8)
static

◆ p16uc_PSET32_WODD [1/2]

Packet16uc Eigen::internal::p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8)
static

Referenced by pmul< Packet2cf >().

◆ p16uc_PSET32_WODD [2/2]

Packet16uc Eigen::internal::p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8)
static

◆ p16uc_PSET64_HI [1/2]

Packet16uc Eigen::internal::p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN)
static

◆ p16uc_PSET64_HI [2/2]

Packet16uc Eigen::internal::p16uc_PSET64_HI = { 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 }
static

◆ p16uc_PSET64_LO [1/2]

Packet16uc Eigen::internal::p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN)
static

Referenced by pmul< Packet1cd >().

◆ p16uc_PSET64_LO [2/2]

Packet16uc Eigen::internal::p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN)
static

◆ p16uc_REVERSE32 [1/2]

Packet16uc Eigen::internal::p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }
static

Referenced by preverse(), and preverse().

◆ p16uc_REVERSE32 [2/2]

Packet16uc Eigen::internal::p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 }
static

◆ p16uc_REVERSE64 [1/2]

Packet16uc Eigen::internal::p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }
static

Referenced by pdiv< Packet1cd >(), and preverse().

◆ p16uc_REVERSE64 [2/2]

Packet16uc Eigen::internal::p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 }
static

◆ p16uc_TRANSPOSE64_HI [1/2]

Packet16uc Eigen::internal::p16uc_TRANSPOSE64_HI = p16uc_PSET64_HI + p16uc_HALF64_0_16
static

Referenced by ptranspose(), and ptranspose().

◆ p16uc_TRANSPOSE64_HI [2/2]

Packet16uc Eigen::internal::p16uc_TRANSPOSE64_HI = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23}
static

◆ p16uc_TRANSPOSE64_LO [1/2]

Packet16uc Eigen::internal::p16uc_TRANSPOSE64_LO = p16uc_PSET64_LO + p16uc_HALF64_0_16
static

Referenced by ptranspose(), and ptranspose().

◆ p16uc_TRANSPOSE64_LO [2/2]

Packet16uc Eigen::internal::p16uc_TRANSPOSE64_LO = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31}
static

◆ p2d_COUNTDOWN

Packet2d Eigen::internal::p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet16uc>(p2d_ZERO), reinterpret_cast<Packet16uc>(p2d_ONE), 8))
static

Referenced by plset< Packet2d >().

◆ p2d_ONE

Packet2d Eigen::internal::p2d_ONE = { 1.0, 1.0 }
static

◆ p2d_ZERO_

Packet2d Eigen::internal::p2d_ZERO_ = { -0.0, -0.0 }
static

Referenced by pdiv< Packet1cd >().

◆ p2ul_CONJ_XOR1

Packet2ul Eigen::internal::p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8)
static

Referenced by pmul< Packet1cd >().

◆ p2ul_CONJ_XOR2

Packet2ul Eigen::internal::p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO, (Packet4ui) p2d_ZERO_, 8)
static

Referenced by pconj().

◆ p4f_COUNTDOWN [1/2]

Packet4f Eigen::internal::p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }
static

Referenced by plset< Packet4f >().

◆ p4f_COUNTDOWN [2/2]

Packet4f Eigen::internal::p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 }
static

◆ p4f_MZERO

Packet4f Eigen::internal::p4f_MZERO = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1)
static

◆ p4f_ONE

Packet4f Eigen::internal::p4f_ONE = vec_ctf(p4i_ONE, 0)
static

Referenced by pdiv< Packet4f >().

◆ p4i_COUNTDOWN [1/2]

Packet4i Eigen::internal::p4i_COUNTDOWN = { 0, 1, 2, 3 }
static

Referenced by plset< Packet4i >().

◆ p4i_COUNTDOWN [2/2]

Packet4i Eigen::internal::p4i_COUNTDOWN = { 0, 1, 2, 3 }
static

◆ p4ui_CONJ_XOR

uint32x4_t Eigen::internal::p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO)
inlinestatic

Referenced by pconj(), and pmul< Packet2cf >().

◆ y

const Scalar & Eigen::internal::y
Initial value:
{
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y)

Referenced by Eigen::MatrixBase< Homogeneous< MatrixType, _Direction > >::applyOnTheRight(), bicgstab(), conservative_sparse_sparse_product_impl(), Eigen::numext::equal_strict(), Eigen::numext::equal_strict(), Eigen::numext::equal_strict(), Eigen::internal::scalar_fuzzy_impl< bool >::isApprox(), Eigen::internal::scalar_fuzzy_default_impl< Scalar, false, true >::isApprox(), Eigen::internal::scalar_fuzzy_default_impl< Scalar, false, false >::isApprox(), Eigen::internal::scalar_fuzzy_default_impl< Scalar, true, false >::isApprox(), isApprox(), Eigen::internal::scalar_fuzzy_impl< bool >::isApproxOrLessThan(), Eigen::internal::scalar_fuzzy_default_impl< Scalar, false, true >::isApproxOrLessThan(), Eigen::internal::scalar_fuzzy_default_impl< Scalar, false, false >::isApproxOrLessThan(), isApproxOrLessThan(), Eigen::internal::scalar_fuzzy_default_impl< Scalar, false, false >::isMuchSmallerThan(), Eigen::internal::scalar_fuzzy_default_impl< Scalar, true, false >::isMuchSmallerThan(), isMuchSmallerThan(), llt_rank_update_lower(), Eigen::numext::not_equal_strict(), Eigen::numext::not_equal_strict(), Eigen::numext::not_equal_strict(), Eigen::internal::scalar_hypot_op< Scalar, Scalar >::operator()(), pcos< Packet4f >(), pexp< Packet4f >(), pexp< Packet8f >(), plog< Packet4f >(), plog< Packet8f >(), Eigen::internal::conj_helper< LhsScalar, RhsScalar, ConjLhs, ConjRhs >::pmadd(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, false, true >::pmadd(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, true, false >::pmadd(), Eigen::internal::conj_helper< Packet1cd, Packet1cd, true, true >::pmadd(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, false, true >::pmadd(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, true, false >::pmadd(), Eigen::internal::conj_helper< Packet2cd, Packet2cd, true, true >::pmadd(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, false, true >::pmadd(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, true, false >::pmadd(), Eigen::internal::conj_helper< Packet2cf, Packet2cf, true, true >::pmadd(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, false, true >::pmadd(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, true, false >::pmadd(), Eigen::internal::conj_helper< Packet4cf, Packet4cf, true, true >::pmadd(), Eigen::internal::conj_helper< RealScalar, std::complex< RealScalar >, false, Conj >::pmadd(), Eigen::internal::conj_helper< std::complex< RealScalar >, RealScalar, Conj, false >::pmadd(), Eigen::internal::conj_helper< Scalar, Scalar, false, false >::pmadd(), Eigen::internal::conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, false, true >::pmadd(), Eigen::internal::conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, true, false >::pmadd(), Eigen::internal::conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, true, true >::pmadd(), Eigen::internal::conj_helper< LhsScalar, RhsScalar, ConjLhs, ConjRhs >::pmul(), Eigen::internal::conj_helper< RealScalar, std::complex< RealScalar >, false, Conj >::pmul(), Eigen::internal::conj_helper< std::complex< RealScalar >, RealScalar, Conj, false >::pmul(), Eigen::internal::conj_helper< Scalar, Scalar, false, false >::pmul(), Eigen::internal::conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, false, true >::pmul(), Eigen::internal::conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, true, false >::pmul(), Eigen::internal::conj_helper< std::complex< RealScalar >, std::complex< RealScalar >, true, true >::pmul(), positive_real_hypot(), psin< Packet4f >(), Eigen::internal::isApprox_selector< Derived, OtherDerived, true >::run(), Eigen::internal::isApprox_selector< Derived, OtherDerived, is_integer >::run(), Eigen::internal::isMuchSmallerThan_object_selector< Derived, OtherDerived, is_integer >::run(), Eigen::internal::isMuchSmallerThan_scalar_selector< Derived, is_integer >::run(), Eigen::internal::random_default_impl< half, false, false >::run(), Eigen::internal::random_default_impl< Scalar, false, false >::run(), Eigen::internal::random_default_impl< Scalar, false, true >::run(), Eigen::internal::random_default_impl< Scalar, true, false >::run(), Eigen::internal::hypot_impl< Scalar >::run(), Eigen::internal::pow_impl< ScalarX, ScalarY, IsInteger >::run(), Eigen::internal::apply_rotation_in_the_plane_selector< Scalar, OtherScalar, SizeAtCompileTime, MinAlignment, Vectorizable >::run(), Eigen::internal::apply_rotation_in_the_plane_selector< Scalar, OtherScalar, SizeAtCompileTime, MinAlignment, true >::run(), Eigen::internal::pow_impl< ScalarX, ScalarY, true >::run(), and sparse_sparse_to_dense_product_impl().