Eigen  3.2.93
SparseMatrix.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
41 namespace internal {
42 template<typename _Scalar, int _Options, typename _Index>
43 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44 {
45  typedef _Scalar Scalar;
46  typedef _Index StorageIndex;
47  typedef Sparse StorageKind;
48  typedef MatrixXpr XprKind;
49  enum {
50  RowsAtCompileTime = Dynamic,
51  ColsAtCompileTime = Dynamic,
52  MaxRowsAtCompileTime = Dynamic,
53  MaxColsAtCompileTime = Dynamic,
54  Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
55  SupportedAccessPatterns = InnerRandomAccessPattern
56  };
57 };
58 
59 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
60 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
61 {
62  typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
63  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
64  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
65 
66  typedef _Scalar Scalar;
67  typedef Dense StorageKind;
68  typedef _Index StorageIndex;
69  typedef MatrixXpr XprKind;
70 
71  enum {
72  RowsAtCompileTime = Dynamic,
73  ColsAtCompileTime = 1,
74  MaxRowsAtCompileTime = Dynamic,
75  MaxColsAtCompileTime = 1,
76  Flags = LvalueBit
77  };
78 };
79 
80 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
81 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
82  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
83 {
84  enum {
85  Flags = 0
86  };
87 };
88 
89 } // end namespace internal
90 
91 template<typename _Scalar, int _Options, typename _Index>
93  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _Index> >
94 {
96  using Base::convert_index;
97  public:
98  using Base::isCompressed;
99  using Base::nonZeros;
100  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
101  using Base::operator+=;
102  using Base::operator-=;
103 
107  typedef typename Base::InnerIterator InnerIterator;
108  typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
109 
110 
111  using Base::IsRowMajor;
112  typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
113  enum {
114  Options = _Options
115  };
116 
117  typedef typename Base::IndexVector IndexVector;
118  typedef typename Base::ScalarVector ScalarVector;
119  protected:
121 
122  Index m_outerSize;
123  Index m_innerSize;
124  StorageIndex* m_outerIndex;
125  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
126  Storage m_data;
127 
128  public:
129 
131  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
133  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
134 
136  inline Index innerSize() const { return m_innerSize; }
138  inline Index outerSize() const { return m_outerSize; }
139 
143  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
147  inline Scalar* valuePtr() { return m_data.valuePtr(); }
148 
152  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
156  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
157 
161  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
165  inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
166 
170  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
174  inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
175 
177  inline Storage& data() { return m_data; }
179  inline const Storage& data() const { return m_data; }
180 
183  inline Scalar coeff(Index row, Index col) const
184  {
185  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
186 
187  const Index outer = IsRowMajor ? row : col;
188  const Index inner = IsRowMajor ? col : row;
189  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
190  return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
191  }
192 
201  inline Scalar& coeffRef(Index row, Index col)
202  {
203  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
204 
205  const Index outer = IsRowMajor ? row : col;
206  const Index inner = IsRowMajor ? col : row;
207 
208  Index start = m_outerIndex[outer];
209  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
210  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
211  if(end<=start)
212  return insert(row,col);
213  const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
214  if((p<end) && (m_data.index(p)==inner))
215  return m_data.value(p);
216  else
217  return insert(row,col);
218  }
219 
235  Scalar& insert(Index row, Index col);
236 
237  public:
238 
246  inline void setZero()
247  {
248  m_data.clear();
249  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
250  if(m_innerNonZeros)
251  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
252  }
253 
257  inline void reserve(Index reserveSize)
258  {
259  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
260  m_data.reserve(reserveSize);
261  }
262 
263  #ifdef EIGEN_PARSED_BY_DOXYGEN
264 
276  template<class SizesType>
277  inline void reserve(const SizesType& reserveSizes);
278  #else
279  template<class SizesType>
280  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
281  #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
282  typename
283  #endif
284  SizesType::value_type())
285  {
286  EIGEN_UNUSED_VARIABLE(enableif);
287  reserveInnerVectors(reserveSizes);
288  }
289  #endif // EIGEN_PARSED_BY_DOXYGEN
290  protected:
291  template<class SizesType>
292  inline void reserveInnerVectors(const SizesType& reserveSizes)
293  {
294  if(isCompressed())
295  {
296  Index totalReserveSize = 0;
297  // turn the matrix into non-compressed mode
298  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
299  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
300 
301  // temporarily use m_innerSizes to hold the new starting points.
302  StorageIndex* newOuterIndex = m_innerNonZeros;
303 
304  StorageIndex count = 0;
305  for(Index j=0; j<m_outerSize; ++j)
306  {
307  newOuterIndex[j] = count;
308  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
309  totalReserveSize += reserveSizes[j];
310  }
311  m_data.reserve(totalReserveSize);
312  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
313  for(Index j=m_outerSize-1; j>=0; --j)
314  {
315  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
316  for(Index i=innerNNZ-1; i>=0; --i)
317  {
318  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
319  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
320  }
321  previousOuterIndex = m_outerIndex[j];
322  m_outerIndex[j] = newOuterIndex[j];
323  m_innerNonZeros[j] = innerNNZ;
324  }
325  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
326 
327  m_data.resize(m_outerIndex[m_outerSize]);
328  }
329  else
330  {
331  StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
332  if (!newOuterIndex) internal::throw_std_bad_alloc();
333 
334  StorageIndex count = 0;
335  for(Index j=0; j<m_outerSize; ++j)
336  {
337  newOuterIndex[j] = count;
338  StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
339  StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
340  count += toReserve + m_innerNonZeros[j];
341  }
342  newOuterIndex[m_outerSize] = count;
343 
344  m_data.resize(count);
345  for(Index j=m_outerSize-1; j>=0; --j)
346  {
347  Index offset = newOuterIndex[j] - m_outerIndex[j];
348  if(offset>0)
349  {
350  StorageIndex innerNNZ = m_innerNonZeros[j];
351  for(Index i=innerNNZ-1; i>=0; --i)
352  {
353  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
354  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
355  }
356  }
357  }
358 
359  std::swap(m_outerIndex, newOuterIndex);
360  std::free(newOuterIndex);
361  }
362 
363  }
364  public:
365 
366  //--- low level purely coherent filling ---
367 
378  inline Scalar& insertBack(Index row, Index col)
379  {
380  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
381  }
382 
385  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
386  {
387  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
388  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
389  Index p = m_outerIndex[outer+1];
390  ++m_outerIndex[outer+1];
391  m_data.append(Scalar(0), inner);
392  return m_data.value(p);
393  }
394 
397  inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
398  {
399  Index p = m_outerIndex[outer+1];
400  ++m_outerIndex[outer+1];
401  m_data.append(Scalar(0), inner);
402  return m_data.value(p);
403  }
404 
407  inline void startVec(Index outer)
408  {
409  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
410  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
411  m_outerIndex[outer+1] = m_outerIndex[outer];
412  }
413 
417  inline void finalize()
418  {
419  if(isCompressed())
420  {
421  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
422  Index i = m_outerSize;
423  // find the last filled column
424  while (i>=0 && m_outerIndex[i]==0)
425  --i;
426  ++i;
427  while (i<=m_outerSize)
428  {
429  m_outerIndex[i] = size;
430  ++i;
431  }
432  }
433  }
434 
435  //---
436 
437  template<typename InputIterators>
438  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
439 
440  template<typename InputIterators,typename DupFunctor>
441  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
442 
443  void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
444 
445  template<typename DupFunctor>
446  void collapseDuplicates(DupFunctor dup_func = DupFunctor());
447 
448  //---
449 
452  Scalar& insertByOuterInner(Index j, Index i)
453  {
454  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
455  }
456 
460  {
461  if(isCompressed())
462  return;
463 
464  eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
465 
466  Index oldStart = m_outerIndex[1];
467  m_outerIndex[1] = m_innerNonZeros[0];
468  for(Index j=1; j<m_outerSize; ++j)
469  {
470  Index nextOldStart = m_outerIndex[j+1];
471  Index offset = oldStart - m_outerIndex[j];
472  if(offset>0)
473  {
474  for(Index k=0; k<m_innerNonZeros[j]; ++k)
475  {
476  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
477  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
478  }
479  }
480  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
481  oldStart = nextOldStart;
482  }
483  std::free(m_innerNonZeros);
484  m_innerNonZeros = 0;
485  m_data.resize(m_outerIndex[m_outerSize]);
486  m_data.squeeze();
487  }
488 
490  void uncompress()
491  {
492  if(m_innerNonZeros != 0)
493  return;
494  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
495  for (Index i = 0; i < m_outerSize; i++)
496  {
497  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
498  }
499  }
500 
502  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
503  {
504  prune(default_prunning_func(reference,epsilon));
505  }
506 
514  template<typename KeepFunc>
515  void prune(const KeepFunc& keep = KeepFunc())
516  {
517  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
518  makeCompressed();
519 
520  StorageIndex k = 0;
521  for(Index j=0; j<m_outerSize; ++j)
522  {
523  Index previousStart = m_outerIndex[j];
524  m_outerIndex[j] = k;
525  Index end = m_outerIndex[j+1];
526  for(Index i=previousStart; i<end; ++i)
527  {
528  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
529  {
530  m_data.value(k) = m_data.value(i);
531  m_data.index(k) = m_data.index(i);
532  ++k;
533  }
534  }
535  }
536  m_outerIndex[m_outerSize] = k;
537  m_data.resize(k,0);
538  }
539 
548  void conservativeResize(Index rows, Index cols)
549  {
550  // No change
551  if (this->rows() == rows && this->cols() == cols) return;
552 
553  // If one dimension is null, then there is nothing to be preserved
554  if(rows==0 || cols==0) return resize(rows,cols);
555 
556  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
557  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
558  StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
559 
560  // Deals with inner non zeros
561  if (m_innerNonZeros)
562  {
563  // Resize m_innerNonZeros
564  StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
565  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
566  m_innerNonZeros = newInnerNonZeros;
567 
568  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
569  m_innerNonZeros[i] = 0;
570  }
571  else if (innerChange < 0)
572  {
573  // Inner size decreased: allocate a new m_innerNonZeros
574  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
575  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
576  for(Index i = 0; i < m_outerSize; i++)
577  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
578  }
579 
580  // Change the m_innerNonZeros in case of a decrease of inner size
581  if (m_innerNonZeros && innerChange < 0)
582  {
583  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
584  {
585  StorageIndex &n = m_innerNonZeros[i];
586  StorageIndex start = m_outerIndex[i];
587  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
588  }
589  }
590 
591  m_innerSize = newInnerSize;
592 
593  // Re-allocate outer index structure if necessary
594  if (outerChange == 0)
595  return;
596 
597  StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
598  if (!newOuterIndex) internal::throw_std_bad_alloc();
599  m_outerIndex = newOuterIndex;
600  if (outerChange > 0)
601  {
602  StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
603  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
604  m_outerIndex[i] = last;
605  }
606  m_outerSize += outerChange;
607  }
608 
616  void resize(Index rows, Index cols)
617  {
618  const Index outerSize = IsRowMajor ? rows : cols;
619  m_innerSize = IsRowMajor ? cols : rows;
620  m_data.clear();
621  if (m_outerSize != outerSize || m_outerSize==0)
622  {
623  std::free(m_outerIndex);
624  m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
625  if (!m_outerIndex) internal::throw_std_bad_alloc();
626 
627  m_outerSize = outerSize;
628  }
629  if(m_innerNonZeros)
630  {
631  std::free(m_innerNonZeros);
632  m_innerNonZeros = 0;
633  }
634  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
635  }
636 
639  void resizeNonZeros(Index size)
640  {
641  m_data.resize(size);
642  }
643 
645  const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
646 
651  DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
652 
654  inline SparseMatrix()
655  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
656  {
657  check_template_parameters();
658  resize(0, 0);
659  }
660 
662  inline SparseMatrix(Index rows, Index cols)
663  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
664  {
665  check_template_parameters();
666  resize(rows, cols);
667  }
668 
670  template<typename OtherDerived>
672  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
673  {
674  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
675  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
676  check_template_parameters();
677  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
678  if (needToTranspose)
679  *this = other.derived();
680  else
681  {
682  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
683  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
684  #endif
685  internal::call_assignment_no_alias(*this, other.derived());
686  }
687  }
688 
690  template<typename OtherDerived, unsigned int UpLo>
692  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
693  {
694  check_template_parameters();
695  Base::operator=(other);
696  }
697 
699  inline SparseMatrix(const SparseMatrix& other)
700  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
701  {
702  check_template_parameters();
703  *this = other.derived();
704  }
705 
707  template<typename OtherDerived>
708  SparseMatrix(const ReturnByValue<OtherDerived>& other)
709  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
710  {
711  check_template_parameters();
712  initAssignment(other);
713  other.evalTo(*this);
714  }
715 
717  template<typename OtherDerived>
718  explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
719  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
720  {
721  check_template_parameters();
722  *this = other.derived();
723  }
724 
727  inline void swap(SparseMatrix& other)
728  {
729  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
730  std::swap(m_outerIndex, other.m_outerIndex);
731  std::swap(m_innerSize, other.m_innerSize);
732  std::swap(m_outerSize, other.m_outerSize);
733  std::swap(m_innerNonZeros, other.m_innerNonZeros);
734  m_data.swap(other.m_data);
735  }
736 
739  inline void setIdentity()
740  {
741  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
742  this->m_data.resize(rows());
743  Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
744  Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
745  Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
746  std::free(m_innerNonZeros);
747  m_innerNonZeros = 0;
748  }
749  inline SparseMatrix& operator=(const SparseMatrix& other)
750  {
751  if (other.isRValue())
752  {
753  swap(other.const_cast_derived());
754  }
755  else if(this!=&other)
756  {
757  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
758  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
759  #endif
760  initAssignment(other);
761  if(other.isCompressed())
762  {
763  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
764  m_data = other.m_data;
765  }
766  else
767  {
768  Base::operator=(other);
769  }
770  }
771  return *this;
772  }
773 
774 #ifndef EIGEN_PARSED_BY_DOXYGEN
775  template<typename OtherDerived>
776  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
777  { return Base::operator=(other.derived()); }
778 #endif // EIGEN_PARSED_BY_DOXYGEN
779 
780  template<typename OtherDerived>
781  EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
782 
783  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
784  {
785  EIGEN_DBG_SPARSE(
786  s << "Nonzero entries:\n";
787  if(m.isCompressed())
788  for (Index i=0; i<m.nonZeros(); ++i)
789  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
790  else
791  for (Index i=0; i<m.outerSize(); ++i)
792  {
793  Index p = m.m_outerIndex[i];
794  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
795  Index k=p;
796  for (; k<pe; ++k)
797  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
798  for (; k<m.m_outerIndex[i+1]; ++k)
799  s << "(_,_) ";
800  }
801  s << std::endl;
802  s << std::endl;
803  s << "Outer pointers:\n";
804  for (Index i=0; i<m.outerSize(); ++i)
805  s << m.m_outerIndex[i] << " ";
806  s << " $" << std::endl;
807  if(!m.isCompressed())
808  {
809  s << "Inner non zeros:\n";
810  for (Index i=0; i<m.outerSize(); ++i)
811  s << m.m_innerNonZeros[i] << " ";
812  s << " $" << std::endl;
813  }
814  s << std::endl;
815  );
816  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
817  return s;
818  }
819 
821  inline ~SparseMatrix()
822  {
823  std::free(m_outerIndex);
824  std::free(m_innerNonZeros);
825  }
826 
828  Scalar sum() const;
829 
830 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
831 # include EIGEN_SPARSEMATRIX_PLUGIN
832 # endif
833 
834 protected:
835 
836  template<typename Other>
837  void initAssignment(const Other& other)
838  {
839  resize(other.rows(), other.cols());
840  if(m_innerNonZeros)
841  {
842  std::free(m_innerNonZeros);
843  m_innerNonZeros = 0;
844  }
845  }
846 
849  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
850 
853  class SingletonVector
854  {
855  StorageIndex m_index;
856  StorageIndex m_value;
857  public:
858  typedef StorageIndex value_type;
859  SingletonVector(Index i, Index v)
860  : m_index(convert_index(i)), m_value(convert_index(v))
861  {}
862 
863  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
864  };
865 
868  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
869 
870 public:
873  EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
874  {
875  const Index outer = IsRowMajor ? row : col;
876  const Index inner = IsRowMajor ? col : row;
877 
878  eigen_assert(!isCompressed());
879  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
880 
881  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
882  m_data.index(p) = convert_index(inner);
883  return (m_data.value(p) = 0);
884  }
885 
886 private:
887  static void check_template_parameters()
888  {
889  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
890  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
891  }
892 
893  struct default_prunning_func {
894  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
895  inline bool operator() (const Index&, const Index&, const Scalar& value) const
896  {
897  return !internal::isMuchSmallerThan(value, reference, epsilon);
898  }
899  Scalar reference;
900  RealScalar epsilon;
901  };
902 };
903 
904 namespace internal {
905 
906 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
907 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
908 {
909  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
910  typedef typename SparseMatrixType::Scalar Scalar;
911  typedef typename SparseMatrixType::StorageIndex StorageIndex;
913 
914  if(begin!=end)
915  {
916  // pass 1: count the nnz per inner-vector
917  typename SparseMatrixType::IndexVector wi(trMat.outerSize());
918  wi.setZero();
919  for(InputIterator it(begin); it!=end; ++it)
920  {
921  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
922  wi(IsRowMajor ? it->col() : it->row())++;
923  }
924 
925  // pass 2: insert all the elements into trMat
926  trMat.reserve(wi);
927  for(InputIterator it(begin); it!=end; ++it)
928  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
929 
930  // pass 3:
931  trMat.collapseDuplicates(dup_func);
932  }
933 
934  // pass 4: transposed copy -> implicit sorting
935  mat = trMat;
936 }
937 
938 }
939 
940 
978 template<typename Scalar, int _Options, typename _Index>
979 template<typename InputIterators>
980 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
981 {
982  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
983 }
984 
994 template<typename Scalar, int _Options, typename _Index>
995 template<typename InputIterators,typename DupFunctor>
996 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
997 {
998  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index>, DupFunctor>(begin, end, *this, dup_func);
999 }
1000 
1002 template<typename Scalar, int _Options, typename _Index>
1003 template<typename DupFunctor>
1005 {
1006  eigen_assert(!isCompressed());
1007  // TODO, in practice we should be able to use m_innerNonZeros for that task
1008  IndexVector wi(innerSize());
1009  wi.fill(-1);
1010  StorageIndex count = 0;
1011  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1012  for(Index j=0; j<outerSize(); ++j)
1013  {
1014  StorageIndex start = count;
1015  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1016  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1017  {
1018  Index i = m_data.index(k);
1019  if(wi(i)>=start)
1020  {
1021  // we already meet this entry => accumulate it
1022  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1023  }
1024  else
1025  {
1026  m_data.value(count) = m_data.value(k);
1027  m_data.index(count) = m_data.index(k);
1028  wi(i) = count;
1029  ++count;
1030  }
1031  }
1032  m_outerIndex[j] = start;
1033  }
1034  m_outerIndex[m_outerSize] = count;
1035 
1036  // turn the matrix into compressed form
1037  std::free(m_innerNonZeros);
1038  m_innerNonZeros = 0;
1039  m_data.resize(m_outerIndex[m_outerSize]);
1040 }
1041 
1042 template<typename Scalar, int _Options, typename _Index>
1043 template<typename OtherDerived>
1045 {
1046  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1047  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1048 
1049  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1050  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1051  #endif
1052 
1053  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1054  if (needToTranspose)
1055  {
1056  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1057  EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1058  #endif
1059  // two passes algorithm:
1060  // 1 - compute the number of coeffs per dest inner vector
1061  // 2 - do the actual copy/eval
1062  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1063  typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1064  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1065  typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1066  OtherCopy otherCopy(other.derived());
1067  OtherCopyEval otherCopyEval(otherCopy);
1068 
1069  SparseMatrix dest(other.rows(),other.cols());
1070  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1071 
1072  // pass 1
1073  // FIXME the above copy could be merged with that pass
1074  for (Index j=0; j<otherCopy.outerSize(); ++j)
1075  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1076  ++dest.m_outerIndex[it.index()];
1077 
1078  // prefix sum
1079  StorageIndex count = 0;
1080  IndexVector positions(dest.outerSize());
1081  for (Index j=0; j<dest.outerSize(); ++j)
1082  {
1083  StorageIndex tmp = dest.m_outerIndex[j];
1084  dest.m_outerIndex[j] = count;
1085  positions[j] = count;
1086  count += tmp;
1087  }
1088  dest.m_outerIndex[dest.outerSize()] = count;
1089  // alloc
1090  dest.m_data.resize(count);
1091  // pass 2
1092  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1093  {
1094  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1095  {
1096  Index pos = positions[it.index()]++;
1097  dest.m_data.index(pos) = j;
1098  dest.m_data.value(pos) = it.value();
1099  }
1100  }
1101  this->swap(dest);
1102  return *this;
1103  }
1104  else
1105  {
1106  if(other.isRValue())
1107  {
1108  initAssignment(other.derived());
1109  }
1110  // there is no special optimization
1111  return Base::operator=(other.derived());
1112  }
1113 }
1114 
1115 template<typename _Scalar, int _Options, typename _Index>
1116 typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insert(Index row, Index col)
1117 {
1118  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1119 
1120  const Index outer = IsRowMajor ? row : col;
1121  const Index inner = IsRowMajor ? col : row;
1122 
1123  if(isCompressed())
1124  {
1125  if(nonZeros()==0)
1126  {
1127  // reserve space if not already done
1128  if(m_data.allocatedSize()==0)
1129  m_data.reserve(2*m_innerSize);
1130 
1131  // turn the matrix into non-compressed mode
1132  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1133  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1134 
1135  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1136 
1137  // pack all inner-vectors to the end of the pre-allocated space
1138  // and allocate the entire free-space to the first inner-vector
1139  StorageIndex end = convert_index(m_data.allocatedSize());
1140  for(Index j=1; j<=m_outerSize; ++j)
1141  m_outerIndex[j] = end;
1142  }
1143  else
1144  {
1145  // turn the matrix into non-compressed mode
1146  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1147  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1148  for(Index j=0; j<m_outerSize; ++j)
1149  m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1150  }
1151  }
1152 
1153  // check whether we can do a fast "push back" insertion
1154  Index data_end = m_data.allocatedSize();
1155 
1156  // First case: we are filling a new inner vector which is packed at the end.
1157  // We assume that all remaining inner-vectors are also empty and packed to the end.
1158  if(m_outerIndex[outer]==data_end)
1159  {
1160  eigen_internal_assert(m_innerNonZeros[outer]==0);
1161 
1162  // pack previous empty inner-vectors to end of the used-space
1163  // and allocate the entire free-space to the current inner-vector.
1164  StorageIndex p = convert_index(m_data.size());
1165  Index j = outer;
1166  while(j>=0 && m_innerNonZeros[j]==0)
1167  m_outerIndex[j--] = p;
1168 
1169  // push back the new element
1170  ++m_innerNonZeros[outer];
1171  m_data.append(Scalar(0), inner);
1172 
1173  // check for reallocation
1174  if(data_end != m_data.allocatedSize())
1175  {
1176  // m_data has been reallocated
1177  // -> move remaining inner-vectors back to the end of the free-space
1178  // so that the entire free-space is allocated to the current inner-vector.
1179  eigen_internal_assert(data_end < m_data.allocatedSize());
1180  StorageIndex new_end = convert_index(m_data.allocatedSize());
1181  for(Index k=outer+1; k<=m_outerSize; ++k)
1182  if(m_outerIndex[k]==data_end)
1183  m_outerIndex[k] = new_end;
1184  }
1185  return m_data.value(p);
1186  }
1187 
1188  // Second case: the next inner-vector is packed to the end
1189  // and the current inner-vector end match the used-space.
1190  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1191  {
1192  eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1193 
1194  // add space for the new element
1195  ++m_innerNonZeros[outer];
1196  m_data.resize(m_data.size()+1);
1197 
1198  // check for reallocation
1199  if(data_end != m_data.allocatedSize())
1200  {
1201  // m_data has been reallocated
1202  // -> move remaining inner-vectors back to the end of the free-space
1203  // so that the entire free-space is allocated to the current inner-vector.
1204  eigen_internal_assert(data_end < m_data.allocatedSize());
1205  StorageIndex new_end = convert_index(m_data.allocatedSize());
1206  for(Index k=outer+1; k<=m_outerSize; ++k)
1207  if(m_outerIndex[k]==data_end)
1208  m_outerIndex[k] = new_end;
1209  }
1210 
1211  // and insert it at the right position (sorted insertion)
1212  Index startId = m_outerIndex[outer];
1213  Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1214  while ( (p > startId) && (m_data.index(p-1) > inner) )
1215  {
1216  m_data.index(p) = m_data.index(p-1);
1217  m_data.value(p) = m_data.value(p-1);
1218  --p;
1219  }
1220 
1221  m_data.index(p) = convert_index(inner);
1222  return (m_data.value(p) = 0);
1223  }
1224 
1225  if(m_data.size() != m_data.allocatedSize())
1226  {
1227  // make sure the matrix is compatible to random un-compressed insertion:
1228  m_data.resize(m_data.allocatedSize());
1229  this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1230  }
1231 
1232  return insertUncompressed(row,col);
1233 }
1234 
1235 template<typename _Scalar, int _Options, typename _Index>
1236 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
1237 {
1238  eigen_assert(!isCompressed());
1239 
1240  const Index outer = IsRowMajor ? row : col;
1241  const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1242 
1243  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1244  StorageIndex innerNNZ = m_innerNonZeros[outer];
1245  if(innerNNZ>=room)
1246  {
1247  // this inner vector is full, we need to reallocate the whole buffer :(
1248  reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1249  }
1250 
1251  Index startId = m_outerIndex[outer];
1252  Index p = startId + m_innerNonZeros[outer];
1253  while ( (p > startId) && (m_data.index(p-1) > inner) )
1254  {
1255  m_data.index(p) = m_data.index(p-1);
1256  m_data.value(p) = m_data.value(p-1);
1257  --p;
1258  }
1259  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1260 
1261  m_innerNonZeros[outer]++;
1262 
1263  m_data.index(p) = inner;
1264  return (m_data.value(p) = 0);
1265 }
1266 
1267 template<typename _Scalar, int _Options, typename _Index>
1268 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
1269 {
1270  eigen_assert(isCompressed());
1271 
1272  const Index outer = IsRowMajor ? row : col;
1273  const Index inner = IsRowMajor ? col : row;
1274 
1275  Index previousOuter = outer;
1276  if (m_outerIndex[outer+1]==0)
1277  {
1278  // we start a new inner vector
1279  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1280  {
1281  m_outerIndex[previousOuter] = convert_index(m_data.size());
1282  --previousOuter;
1283  }
1284  m_outerIndex[outer+1] = m_outerIndex[outer];
1285  }
1286 
1287  // here we have to handle the tricky case where the outerIndex array
1288  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1289  // the 2nd inner vector...
1290  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1291  && (size_t(m_outerIndex[outer+1]) == m_data.size());
1292 
1293  size_t startId = m_outerIndex[outer];
1294  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
1295  size_t p = m_outerIndex[outer+1];
1296  ++m_outerIndex[outer+1];
1297 
1298  double reallocRatio = 1;
1299  if (m_data.allocatedSize()<=m_data.size())
1300  {
1301  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1302  if (m_data.size()==0)
1303  {
1304  m_data.reserve(32);
1305  }
1306  else
1307  {
1308  // we need to reallocate the data, to reduce multiple reallocations
1309  // we use a smart resize algorithm based on the current filling ratio
1310  // in addition, we use double to avoid integers overflows
1311  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1312  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1313  // furthermore we bound the realloc ratio to:
1314  // 1) reduce multiple minor realloc when the matrix is almost filled
1315  // 2) avoid to allocate too much memory when the matrix is almost empty
1316  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1317  }
1318  }
1319  m_data.resize(m_data.size()+1,reallocRatio);
1320 
1321  if (!isLastVec)
1322  {
1323  if (previousOuter==-1)
1324  {
1325  // oops wrong guess.
1326  // let's correct the outer offsets
1327  for (Index k=0; k<=(outer+1); ++k)
1328  m_outerIndex[k] = 0;
1329  Index k=outer+1;
1330  while(m_outerIndex[k]==0)
1331  m_outerIndex[k++] = 1;
1332  while (k<=m_outerSize && m_outerIndex[k]!=0)
1333  m_outerIndex[k++]++;
1334  p = 0;
1335  --k;
1336  k = m_outerIndex[k]-1;
1337  while (k>0)
1338  {
1339  m_data.index(k) = m_data.index(k-1);
1340  m_data.value(k) = m_data.value(k-1);
1341  k--;
1342  }
1343  }
1344  else
1345  {
1346  // we are not inserting into the last inner vec
1347  // update outer indices:
1348  Index j = outer+2;
1349  while (j<=m_outerSize && m_outerIndex[j]!=0)
1350  m_outerIndex[j++]++;
1351  --j;
1352  // shift data of last vecs:
1353  Index k = m_outerIndex[j]-1;
1354  while (k>=Index(p))
1355  {
1356  m_data.index(k) = m_data.index(k-1);
1357  m_data.value(k) = m_data.value(k-1);
1358  k--;
1359  }
1360  }
1361  }
1362 
1363  while ( (p > startId) && (m_data.index(p-1) > inner) )
1364  {
1365  m_data.index(p) = m_data.index(p-1);
1366  m_data.value(p) = m_data.value(p-1);
1367  --p;
1368  }
1369 
1370  m_data.index(p) = inner;
1371  return (m_data.value(p) = 0);
1372 }
1373 
1374 namespace internal {
1375 
1376 template<typename _Scalar, int _Options, typename _Index>
1377 struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
1378  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > >
1379 {
1380  typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > > Base;
1381  typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType;
1382  evaluator() : Base() {}
1383  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1384 };
1385 
1386 }
1387 
1388 } // end namespace Eigen
1389 
1390 #endif // EIGEN_SPARSEMATRIX_H
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:165
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1116
Index cols() const
Definition: SparseMatrix.h:133
Definition: Constants.h:320
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:548
const unsigned int CompressedAccessBit
Definition: Constants.h:186
A versatible sparse matrix representation.
Definition: SparseMatrix.h:92
void uncompress()
Definition: SparseMatrix.h:490
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:515
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:651
~SparseMatrix()
Definition: SparseMatrix.h:821
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:88
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:201
const unsigned int LvalueBit
Definition: Constants.h:139
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:699
Index nonZeros() const
Definition: SparseCompressedBase.h:56
Namespace containing all symbols from the Eigen library.
Definition: Core:271
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:161
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:43
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:174
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:167
Derived & derived()
Definition: EigenBase.h:44
Index outerSize() const
Definition: SparseMatrix.h:138
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:37
const unsigned int RowMajorBit
Definition: Constants.h:61
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:718
Scalar * valuePtr()
Definition: SparseMatrix.h:147
Definition: EigenBase.h:28
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:980
void setIdentity()
Definition: SparseMatrix.h:739
Base class of any sparse matrices or sparse expressions.
Definition: ForwardDeclarations.h:281
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:502
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: XprHelper.h:35
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:727
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:616
void reserve(Index reserveSize)
Definition: SparseMatrix.h:257
Index cols() const
Definition: SparseMatrixBase.h:158
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:662
void makeCompressed()
Definition: SparseMatrix.h:459
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:671
Scalar value_type
Definition: SparseMatrixBase.h:36
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:183
Index innerSize() const
Definition: SparseMatrix.h:136
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:152
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:708
bool isCompressed() const
Definition: SparseCompressedBase.h:107
SparseMatrix()
Definition: SparseMatrix.h:654
Definition: Eigen_Colamd.h:50
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:156
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:645
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:45
Definition: Constants.h:322
Index rows() const
Definition: SparseMatrixBase.h:156
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:170
void setZero()
Definition: SparseMatrix.h:246
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:63
const int Dynamic
Definition: Constants.h:21
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:691
const Scalar * valuePtr() const
Definition: SparseMatrix.h:143
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:15
Sparse matrix.
Definition: MappedSparseMatrix.h:32
Index rows() const
Definition: SparseMatrix.h:131