Redux.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. // This file is part of Eigen, a lightweight C++ template library
  2. // for linear algebra.
  3. //
  4. // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
  5. // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
  6. //
  7. // This Source Code Form is subject to the terms of the Mozilla
  8. // Public License v. 2.0. If a copy of the MPL was not distributed
  9. // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
  10. #ifndef EIGEN_REDUX_H
  11. #define EIGEN_REDUX_H
  12. namespace Eigen {
  13. namespace internal {
  14. // TODO
  15. // * implement other kind of vectorization
  16. // * factorize code
  17. /***************************************************************************
  18. * Part 1 : the logic deciding a strategy for vectorization and unrolling
  19. ***************************************************************************/
  20. template<typename Func, typename Derived>
  21. struct redux_traits
  22. {
  23. public:
  24. typedef typename find_best_packet<typename Derived::Scalar,Derived::SizeAtCompileTime>::type PacketType;
  25. enum {
  26. PacketSize = unpacket_traits<PacketType>::size,
  27. InnerMaxSize = int(Derived::IsRowMajor)
  28. ? Derived::MaxColsAtCompileTime
  29. : Derived::MaxRowsAtCompileTime
  30. };
  31. enum {
  32. MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit)
  33. && (functor_traits<Func>::PacketAccess),
  34. MayLinearVectorize = bool(MightVectorize) && (int(Derived::Flags)&LinearAccessBit),
  35. MaySliceVectorize = bool(MightVectorize) && int(InnerMaxSize)>=3*PacketSize
  36. };
  37. public:
  38. enum {
  39. Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
  40. : int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
  41. : int(DefaultTraversal)
  42. };
  43. public:
  44. enum {
  45. Cost = Derived::SizeAtCompileTime == Dynamic ? HugeCost
  46. : Derived::SizeAtCompileTime * Derived::CoeffReadCost + (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost,
  47. UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))
  48. };
  49. public:
  50. enum {
  51. Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling
  52. };
  53. #ifdef EIGEN_DEBUG_ASSIGN
  54. static void debug()
  55. {
  56. std::cerr << "Xpr: " << typeid(typename Derived::XprType).name() << std::endl;
  57. std::cerr.setf(std::ios::hex, std::ios::basefield);
  58. EIGEN_DEBUG_VAR(Derived::Flags)
  59. std::cerr.unsetf(std::ios::hex);
  60. EIGEN_DEBUG_VAR(InnerMaxSize)
  61. EIGEN_DEBUG_VAR(PacketSize)
  62. EIGEN_DEBUG_VAR(MightVectorize)
  63. EIGEN_DEBUG_VAR(MayLinearVectorize)
  64. EIGEN_DEBUG_VAR(MaySliceVectorize)
  65. EIGEN_DEBUG_VAR(Traversal)
  66. EIGEN_DEBUG_VAR(UnrollingLimit)
  67. EIGEN_DEBUG_VAR(Unrolling)
  68. std::cerr << std::endl;
  69. }
  70. #endif
  71. };
  72. /***************************************************************************
  73. * Part 2 : unrollers
  74. ***************************************************************************/
  75. /*** no vectorization ***/
  76. template<typename Func, typename Derived, int Start, int Length>
  77. struct redux_novec_unroller
  78. {
  79. enum {
  80. HalfLength = Length/2
  81. };
  82. typedef typename Derived::Scalar Scalar;
  83. EIGEN_DEVICE_FUNC
  84. static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
  85. {
  86. return func(redux_novec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
  87. redux_novec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func));
  88. }
  89. };
  90. template<typename Func, typename Derived, int Start>
  91. struct redux_novec_unroller<Func, Derived, Start, 1>
  92. {
  93. enum {
  94. outer = Start / Derived::InnerSizeAtCompileTime,
  95. inner = Start % Derived::InnerSizeAtCompileTime
  96. };
  97. typedef typename Derived::Scalar Scalar;
  98. EIGEN_DEVICE_FUNC
  99. static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&)
  100. {
  101. return mat.coeffByOuterInner(outer, inner);
  102. }
  103. };
  104. // This is actually dead code and will never be called. It is required
  105. // to prevent false warnings regarding failed inlining though
  106. // for 0 length run() will never be called at all.
  107. template<typename Func, typename Derived, int Start>
  108. struct redux_novec_unroller<Func, Derived, Start, 0>
  109. {
  110. typedef typename Derived::Scalar Scalar;
  111. EIGEN_DEVICE_FUNC
  112. static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); }
  113. };
  114. /*** vectorization ***/
  115. template<typename Func, typename Derived, int Start, int Length>
  116. struct redux_vec_unroller
  117. {
  118. enum {
  119. PacketSize = redux_traits<Func, Derived>::PacketSize,
  120. HalfLength = Length/2
  121. };
  122. typedef typename Derived::Scalar Scalar;
  123. typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
  124. static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func)
  125. {
  126. return func.packetOp(
  127. redux_vec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),
  128. redux_vec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func) );
  129. }
  130. };
  131. template<typename Func, typename Derived, int Start>
  132. struct redux_vec_unroller<Func, Derived, Start, 1>
  133. {
  134. enum {
  135. index = Start * redux_traits<Func, Derived>::PacketSize,
  136. outer = index / int(Derived::InnerSizeAtCompileTime),
  137. inner = index % int(Derived::InnerSizeAtCompileTime),
  138. alignment = Derived::Alignment
  139. };
  140. typedef typename Derived::Scalar Scalar;
  141. typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
  142. static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&)
  143. {
  144. return mat.template packetByOuterInner<alignment,PacketScalar>(outer, inner);
  145. }
  146. };
  147. /***************************************************************************
  148. * Part 3 : implementation of all cases
  149. ***************************************************************************/
  150. template<typename Func, typename Derived,
  151. int Traversal = redux_traits<Func, Derived>::Traversal,
  152. int Unrolling = redux_traits<Func, Derived>::Unrolling
  153. >
  154. struct redux_impl;
  155. template<typename Func, typename Derived>
  156. struct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>
  157. {
  158. typedef typename Derived::Scalar Scalar;
  159. EIGEN_DEVICE_FUNC
  160. static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
  161. {
  162. eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
  163. Scalar res;
  164. res = mat.coeffByOuterInner(0, 0);
  165. for(Index i = 1; i < mat.innerSize(); ++i)
  166. res = func(res, mat.coeffByOuterInner(0, i));
  167. for(Index i = 1; i < mat.outerSize(); ++i)
  168. for(Index j = 0; j < mat.innerSize(); ++j)
  169. res = func(res, mat.coeffByOuterInner(i, j));
  170. return res;
  171. }
  172. };
  173. template<typename Func, typename Derived>
  174. struct redux_impl<Func,Derived, DefaultTraversal, CompleteUnrolling>
  175. : public redux_novec_unroller<Func,Derived, 0, Derived::SizeAtCompileTime>
  176. {};
  177. template<typename Func, typename Derived>
  178. struct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>
  179. {
  180. typedef typename Derived::Scalar Scalar;
  181. typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
  182. static Scalar run(const Derived &mat, const Func& func)
  183. {
  184. const Index size = mat.size();
  185. const Index packetSize = redux_traits<Func, Derived>::PacketSize;
  186. const int packetAlignment = unpacket_traits<PacketScalar>::alignment;
  187. enum {
  188. alignment0 = (bool(Derived::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),
  189. alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Derived::Alignment)
  190. };
  191. const Index alignedStart = internal::first_default_aligned(mat.nestedExpression());
  192. const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize);
  193. const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize);
  194. const Index alignedEnd2 = alignedStart + alignedSize2;
  195. const Index alignedEnd = alignedStart + alignedSize;
  196. Scalar res;
  197. if(alignedSize)
  198. {
  199. PacketScalar packet_res0 = mat.template packet<alignment,PacketScalar>(alignedStart);
  200. if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop
  201. {
  202. PacketScalar packet_res1 = mat.template packet<alignment,PacketScalar>(alignedStart+packetSize);
  203. for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize)
  204. {
  205. packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(index));
  206. packet_res1 = func.packetOp(packet_res1, mat.template packet<alignment,PacketScalar>(index+packetSize));
  207. }
  208. packet_res0 = func.packetOp(packet_res0,packet_res1);
  209. if(alignedEnd>alignedEnd2)
  210. packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(alignedEnd2));
  211. }
  212. res = func.predux(packet_res0);
  213. for(Index index = 0; index < alignedStart; ++index)
  214. res = func(res,mat.coeff(index));
  215. for(Index index = alignedEnd; index < size; ++index)
  216. res = func(res,mat.coeff(index));
  217. }
  218. else // too small to vectorize anything.
  219. // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
  220. {
  221. res = mat.coeff(0);
  222. for(Index index = 1; index < size; ++index)
  223. res = func(res,mat.coeff(index));
  224. }
  225. return res;
  226. }
  227. };
  228. // NOTE: for SliceVectorizedTraversal we simply bypass unrolling
  229. template<typename Func, typename Derived, int Unrolling>
  230. struct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>
  231. {
  232. typedef typename Derived::Scalar Scalar;
  233. typedef typename redux_traits<Func, Derived>::PacketType PacketType;
  234. EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func)
  235. {
  236. eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
  237. const Index innerSize = mat.innerSize();
  238. const Index outerSize = mat.outerSize();
  239. enum {
  240. packetSize = redux_traits<Func, Derived>::PacketSize
  241. };
  242. const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;
  243. Scalar res;
  244. if(packetedInnerSize)
  245. {
  246. PacketType packet_res = mat.template packet<Unaligned,PacketType>(0,0);
  247. for(Index j=0; j<outerSize; ++j)
  248. for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))
  249. packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned,PacketType>(j,i));
  250. res = func.predux(packet_res);
  251. for(Index j=0; j<outerSize; ++j)
  252. for(Index i=packetedInnerSize; i<innerSize; ++i)
  253. res = func(res, mat.coeffByOuterInner(j,i));
  254. }
  255. else // too small to vectorize anything.
  256. // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
  257. {
  258. res = redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>::run(mat, func);
  259. }
  260. return res;
  261. }
  262. };
  263. template<typename Func, typename Derived>
  264. struct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>
  265. {
  266. typedef typename Derived::Scalar Scalar;
  267. typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;
  268. enum {
  269. PacketSize = redux_traits<Func, Derived>::PacketSize,
  270. Size = Derived::SizeAtCompileTime,
  271. VectorizedSize = (Size / PacketSize) * PacketSize
  272. };
  273. EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)
  274. {
  275. eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix");
  276. if (VectorizedSize > 0) {
  277. Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func));
  278. if (VectorizedSize != Size)
  279. res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func));
  280. return res;
  281. }
  282. else {
  283. return redux_novec_unroller<Func, Derived, 0, Size>::run(mat,func);
  284. }
  285. }
  286. };
  287. // evaluator adaptor
  288. template<typename _XprType>
  289. class redux_evaluator
  290. {
  291. public:
  292. typedef _XprType XprType;
  293. EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}
  294. typedef typename XprType::Scalar Scalar;
  295. typedef typename XprType::CoeffReturnType CoeffReturnType;
  296. typedef typename XprType::PacketScalar PacketScalar;
  297. typedef typename XprType::PacketReturnType PacketReturnType;
  298. enum {
  299. MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,
  300. MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,
  301. // TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime from the evaluator
  302. Flags = evaluator<XprType>::Flags & ~DirectAccessBit,
  303. IsRowMajor = XprType::IsRowMajor,
  304. SizeAtCompileTime = XprType::SizeAtCompileTime,
  305. InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime,
  306. CoeffReadCost = evaluator<XprType>::CoeffReadCost,
  307. Alignment = evaluator<XprType>::Alignment
  308. };
  309. EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }
  310. EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }
  311. EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }
  312. EIGEN_DEVICE_FUNC Index innerSize() const { return m_xpr.innerSize(); }
  313. EIGEN_DEVICE_FUNC Index outerSize() const { return m_xpr.outerSize(); }
  314. EIGEN_DEVICE_FUNC
  315. CoeffReturnType coeff(Index row, Index col) const
  316. { return m_evaluator.coeff(row, col); }
  317. EIGEN_DEVICE_FUNC
  318. CoeffReturnType coeff(Index index) const
  319. { return m_evaluator.coeff(index); }
  320. template<int LoadMode, typename PacketType>
  321. PacketType packet(Index row, Index col) const
  322. { return m_evaluator.template packet<LoadMode,PacketType>(row, col); }
  323. template<int LoadMode, typename PacketType>
  324. PacketType packet(Index index) const
  325. { return m_evaluator.template packet<LoadMode,PacketType>(index); }
  326. EIGEN_DEVICE_FUNC
  327. CoeffReturnType coeffByOuterInner(Index outer, Index inner) const
  328. { return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
  329. template<int LoadMode, typename PacketType>
  330. PacketType packetByOuterInner(Index outer, Index inner) const
  331. { return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }
  332. const XprType & nestedExpression() const { return m_xpr; }
  333. protected:
  334. internal::evaluator<XprType> m_evaluator;
  335. const XprType &m_xpr;
  336. };
  337. } // end namespace internal
  338. /***************************************************************************
  339. * Part 4 : public API
  340. ***************************************************************************/
  341. /** \returns the result of a full redux operation on the whole matrix or vector using \a func
  342. *
  343. * The template parameter \a BinaryOp is the type of the functor \a func which must be
  344. * an associative operator. Both current C++98 and C++11 functor styles are handled.
  345. *
  346. * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
  347. */
  348. template<typename Derived>
  349. template<typename Func>
  350. EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
  351. DenseBase<Derived>::redux(const Func& func) const
  352. {
  353. eigen_assert(this->rows()>0 && this->cols()>0 && "you are using an empty matrix");
  354. typedef typename internal::redux_evaluator<Derived> ThisEvaluator;
  355. ThisEvaluator thisEval(derived());
  356. return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func);
  357. }
  358. /** \returns the minimum of all coefficients of \c *this.
  359. * \warning the result is undefined if \c *this contains NaN.
  360. */
  361. template<typename Derived>
  362. EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
  363. DenseBase<Derived>::minCoeff() const
  364. {
  365. return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar>());
  366. }
  367. /** \returns the maximum of all coefficients of \c *this.
  368. * \warning the result is undefined if \c *this contains NaN.
  369. */
  370. template<typename Derived>
  371. EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
  372. DenseBase<Derived>::maxCoeff() const
  373. {
  374. return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar>());
  375. }
  376. /** \returns the sum of all coefficients of \c *this
  377. *
  378. * If \c *this is empty, then the value 0 is returned.
  379. *
  380. * \sa trace(), prod(), mean()
  381. */
  382. template<typename Derived>
  383. EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
  384. DenseBase<Derived>::sum() const
  385. {
  386. if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
  387. return Scalar(0);
  388. return derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>());
  389. }
  390. /** \returns the mean of all coefficients of *this
  391. *
  392. * \sa trace(), prod(), sum()
  393. */
  394. template<typename Derived>
  395. EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
  396. DenseBase<Derived>::mean() const
  397. {
  398. #ifdef __INTEL_COMPILER
  399. #pragma warning push
  400. #pragma warning ( disable : 2259 )
  401. #endif
  402. return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>())) / Scalar(this->size());
  403. #ifdef __INTEL_COMPILER
  404. #pragma warning pop
  405. #endif
  406. }
  407. /** \returns the product of all coefficients of *this
  408. *
  409. * Example: \include MatrixBase_prod.cpp
  410. * Output: \verbinclude MatrixBase_prod.out
  411. *
  412. * \sa sum(), mean(), trace()
  413. */
  414. template<typename Derived>
  415. EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
  416. DenseBase<Derived>::prod() const
  417. {
  418. if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))
  419. return Scalar(1);
  420. return derived().redux(Eigen::internal::scalar_product_op<Scalar>());
  421. }
  422. /** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
  423. *
  424. * \c *this can be any matrix, not necessarily square.
  425. *
  426. * \sa diagonal(), sum()
  427. */
  428. template<typename Derived>
  429. EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar
  430. MatrixBase<Derived>::trace() const
  431. {
  432. return derived().diagonal().sum();
  433. }
  434. } // end namespace Eigen
  435. #endif // EIGEN_REDUX_H