SparseAssign.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. // This file is part of Eigen, a lightweight C++ template library
  2. // for linear algebra.
  3. //
  4. // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
  5. //
  6. // This Source Code Form is subject to the terms of the Mozilla
  7. // Public License v. 2.0. If a copy of the MPL was not distributed
  8. // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
  9. #ifndef EIGEN_SPARSEASSIGN_H
  10. #define EIGEN_SPARSEASSIGN_H
  11. namespace Eigen {
  12. template<typename Derived>
  13. template<typename OtherDerived>
  14. Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
  15. {
  16. internal::call_assignment_no_alias(derived(), other.derived());
  17. return derived();
  18. }
  19. template<typename Derived>
  20. template<typename OtherDerived>
  21. Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
  22. {
  23. // TODO use the evaluator mechanism
  24. other.evalTo(derived());
  25. return derived();
  26. }
  27. template<typename Derived>
  28. template<typename OtherDerived>
  29. inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
  30. {
  31. // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
  32. internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
  33. ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
  34. return derived();
  35. }
  36. template<typename Derived>
  37. inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
  38. {
  39. internal::call_assignment_no_alias(derived(), other.derived());
  40. return derived();
  41. }
  42. namespace internal {
  43. template<>
  44. struct storage_kind_to_evaluator_kind<Sparse> {
  45. typedef IteratorBased Kind;
  46. };
  47. template<>
  48. struct storage_kind_to_shape<Sparse> {
  49. typedef SparseShape Shape;
  50. };
  51. struct Sparse2Sparse {};
  52. struct Sparse2Dense {};
  53. template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; };
  54. template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };
  55. template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; };
  56. template<> struct AssignmentKind<DenseShape, SparseTriangularShape> { typedef Sparse2Dense Kind; };
  57. template<typename DstXprType, typename SrcXprType>
  58. void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
  59. {
  60. typedef typename DstXprType::Scalar Scalar;
  61. typedef internal::evaluator<DstXprType> DstEvaluatorType;
  62. typedef internal::evaluator<SrcXprType> SrcEvaluatorType;
  63. SrcEvaluatorType srcEvaluator(src);
  64. const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
  65. const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
  66. if ((!transpose) && src.isRValue())
  67. {
  68. // eval without temporary
  69. dst.resize(src.rows(), src.cols());
  70. dst.setZero();
  71. dst.reserve((std::max)(src.rows(),src.cols())*2);
  72. for (Index j=0; j<outerEvaluationSize; ++j)
  73. {
  74. dst.startVec(j);
  75. for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
  76. {
  77. Scalar v = it.value();
  78. dst.insertBackByOuterInner(j,it.index()) = v;
  79. }
  80. }
  81. dst.finalize();
  82. }
  83. else
  84. {
  85. // eval through a temporary
  86. eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
  87. (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
  88. "the transpose operation is supposed to be handled in SparseMatrix::operator=");
  89. enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
  90. DstXprType temp(src.rows(), src.cols());
  91. temp.reserve((std::max)(src.rows(),src.cols())*2);
  92. for (Index j=0; j<outerEvaluationSize; ++j)
  93. {
  94. temp.startVec(j);
  95. for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
  96. {
  97. Scalar v = it.value();
  98. temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
  99. }
  100. }
  101. temp.finalize();
  102. dst = temp.markAsRValue();
  103. }
  104. }
  105. // Generic Sparse to Sparse assignment
  106. template< typename DstXprType, typename SrcXprType, typename Functor>
  107. struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
  108. {
  109. static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
  110. {
  111. assign_sparse_to_sparse(dst.derived(), src.derived());
  112. }
  113. };
  114. // Generic Sparse to Dense assignment
  115. template< typename DstXprType, typename SrcXprType, typename Functor>
  116. struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
  117. {
  118. static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
  119. {
  120. if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
  121. dst.setZero();
  122. internal::evaluator<SrcXprType> srcEval(src);
  123. resize_if_allowed(dst, src, func);
  124. internal::evaluator<DstXprType> dstEval(dst);
  125. const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
  126. for (Index j=0; j<outerEvaluationSize; ++j)
  127. for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
  128. func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
  129. }
  130. };
  131. // Specialization for "dst = dec.solve(rhs)"
  132. // NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
  133. template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
  134. struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
  135. {
  136. typedef Solve<DecType,RhsType> SrcXprType;
  137. static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
  138. {
  139. Index dstRows = src.rows();
  140. Index dstCols = src.cols();
  141. if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
  142. dst.resize(dstRows, dstCols);
  143. src.dec()._solve_impl(src.rhs(), dst);
  144. }
  145. };
  146. struct Diagonal2Sparse {};
  147. template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
  148. template< typename DstXprType, typename SrcXprType, typename Functor>
  149. struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
  150. {
  151. typedef typename DstXprType::StorageIndex StorageIndex;
  152. typedef typename DstXprType::Scalar Scalar;
  153. typedef Array<StorageIndex,Dynamic,1> ArrayXI;
  154. typedef Array<Scalar,Dynamic,1> ArrayXS;
  155. template<int Options>
  156. static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
  157. {
  158. Index dstRows = src.rows();
  159. Index dstCols = src.cols();
  160. if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
  161. dst.resize(dstRows, dstCols);
  162. Index size = src.diagonal().size();
  163. dst.makeCompressed();
  164. dst.resizeNonZeros(size);
  165. Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
  166. Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
  167. Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
  168. }
  169. template<typename DstDerived>
  170. static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
  171. {
  172. dst.diagonal() = src.diagonal();
  173. }
  174. static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
  175. { dst.diagonal() += src.diagonal(); }
  176. static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
  177. { dst.diagonal() -= src.diagonal(); }
  178. };
  179. } // end namespace internal
  180. } // end namespace Eigen
  181. #endif // EIGEN_SPARSEASSIGN_H