Eigen  3.2.4
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
SparseMatrix.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
41 namespace internal {
42 template<typename _Scalar, int _Options, typename _Index>
43 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44 {
45  typedef _Scalar Scalar;
46  typedef _Index Index;
47  typedef Sparse StorageKind;
48  typedef MatrixXpr XprKind;
49  enum {
50  RowsAtCompileTime = Dynamic,
51  ColsAtCompileTime = Dynamic,
52  MaxRowsAtCompileTime = Dynamic,
53  MaxColsAtCompileTime = Dynamic,
54  Flags = _Options | NestByRefBit | LvalueBit,
55  CoeffReadCost = NumTraits<Scalar>::ReadCost,
56  SupportedAccessPatterns = InnerRandomAccessPattern
57  };
58 };
59 
60 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
61 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
62 {
63  typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
64  typedef typename nested<MatrixType>::type MatrixTypeNested;
65  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
66 
67  typedef _Scalar Scalar;
68  typedef Dense StorageKind;
69  typedef _Index Index;
70  typedef MatrixXpr XprKind;
71 
72  enum {
73  RowsAtCompileTime = Dynamic,
74  ColsAtCompileTime = 1,
75  MaxRowsAtCompileTime = Dynamic,
76  MaxColsAtCompileTime = 1,
77  Flags = 0,
78  CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
79  };
80 };
81 
82 } // end namespace internal
83 
84 template<typename _Scalar, int _Options, typename _Index>
86  : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
87 {
88  public:
89  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
90  EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
91  EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
92 
94  using Base::IsRowMajor;
95  typedef internal::CompressedStorage<Scalar,Index> Storage;
96  enum {
97  Options = _Options
98  };
99 
100  protected:
101 
103 
104  Index m_outerSize;
105  Index m_innerSize;
106  Index* m_outerIndex;
107  Index* m_innerNonZeros; // optional, if null then the data is compressed
108  Storage m_data;
109 
110  Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
111  const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
112 
113  public:
114 
116  inline bool isCompressed() const { return m_innerNonZeros==0; }
117 
119  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
121  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
122 
124  inline Index innerSize() const { return m_innerSize; }
126  inline Index outerSize() const { return m_outerSize; }
127 
131  inline const Scalar* valuePtr() const { return &m_data.value(0); }
135  inline Scalar* valuePtr() { return &m_data.value(0); }
136 
140  inline const Index* innerIndexPtr() const { return &m_data.index(0); }
144  inline Index* innerIndexPtr() { return &m_data.index(0); }
145 
149  inline const Index* outerIndexPtr() const { return m_outerIndex; }
153  inline Index* outerIndexPtr() { return m_outerIndex; }
154 
158  inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
162  inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
163 
165  inline Storage& data() { return m_data; }
167  inline const Storage& data() const { return m_data; }
168 
171  inline Scalar coeff(Index row, Index col) const
172  {
173  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
174 
175  const Index outer = IsRowMajor ? row : col;
176  const Index inner = IsRowMajor ? col : row;
177  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
178  return m_data.atInRange(m_outerIndex[outer], end, inner);
179  }
180 
189  inline Scalar& coeffRef(Index row, Index col)
190  {
191  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
192 
193  const Index outer = IsRowMajor ? row : col;
194  const Index inner = IsRowMajor ? col : row;
195 
196  Index start = m_outerIndex[outer];
197  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
198  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
199  if(end<=start)
200  return insert(row,col);
201  const Index p = m_data.searchLowerIndex(start,end-1,inner);
202  if((p<end) && (m_data.index(p)==inner))
203  return m_data.value(p);
204  else
205  return insert(row,col);
206  }
207 
220  Scalar& insert(Index row, Index col)
221  {
222  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
223 
224  if(isCompressed())
225  {
227  }
228  return insertUncompressed(row,col);
229  }
230 
231  public:
232 
233  class InnerIterator;
234  class ReverseInnerIterator;
235 
237  inline void setZero()
238  {
239  m_data.clear();
240  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
241  if(m_innerNonZeros)
242  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
243  }
244 
246  inline Index nonZeros() const
247  {
248  if(m_innerNonZeros)
249  return innerNonZeros().sum();
250  return static_cast<Index>(m_data.size());
251  }
252 
256  inline void reserve(Index reserveSize)
257  {
258  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
259  m_data.reserve(reserveSize);
260  }
261 
262  #ifdef EIGEN_PARSED_BY_DOXYGEN
263 
266  template<class SizesType>
267  inline void reserve(const SizesType& reserveSizes);
268  #else
269  template<class SizesType>
270  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
271  {
272  EIGEN_UNUSED_VARIABLE(enableif);
273  reserveInnerVectors(reserveSizes);
274  }
275  template<class SizesType>
276  inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
277  #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
278  typename
279  #endif
280  SizesType::Scalar())
281  {
282  EIGEN_UNUSED_VARIABLE(enableif);
283  reserveInnerVectors(reserveSizes);
284  }
285  #endif // EIGEN_PARSED_BY_DOXYGEN
286  protected:
287  template<class SizesType>
288  inline void reserveInnerVectors(const SizesType& reserveSizes)
289  {
290  if(isCompressed())
291  {
292  std::size_t totalReserveSize = 0;
293  // turn the matrix into non-compressed mode
294  m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
295  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
296 
297  // temporarily use m_innerSizes to hold the new starting points.
298  Index* newOuterIndex = m_innerNonZeros;
299 
300  Index count = 0;
301  for(Index j=0; j<m_outerSize; ++j)
302  {
303  newOuterIndex[j] = count;
304  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
305  totalReserveSize += reserveSizes[j];
306  }
307  m_data.reserve(totalReserveSize);
308  Index previousOuterIndex = m_outerIndex[m_outerSize];
309  for(Index j=m_outerSize-1; j>=0; --j)
310  {
311  Index innerNNZ = previousOuterIndex - m_outerIndex[j];
312  for(Index i=innerNNZ-1; i>=0; --i)
313  {
314  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
315  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
316  }
317  previousOuterIndex = m_outerIndex[j];
318  m_outerIndex[j] = newOuterIndex[j];
319  m_innerNonZeros[j] = innerNNZ;
320  }
321  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
322 
323  m_data.resize(m_outerIndex[m_outerSize]);
324  }
325  else
326  {
327  Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
328  if (!newOuterIndex) internal::throw_std_bad_alloc();
329 
330  Index count = 0;
331  for(Index j=0; j<m_outerSize; ++j)
332  {
333  newOuterIndex[j] = count;
334  Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
335  Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
336  count += toReserve + m_innerNonZeros[j];
337  }
338  newOuterIndex[m_outerSize] = count;
339 
340  m_data.resize(count);
341  for(Index j=m_outerSize-1; j>=0; --j)
342  {
343  Index offset = newOuterIndex[j] - m_outerIndex[j];
344  if(offset>0)
345  {
346  Index innerNNZ = m_innerNonZeros[j];
347  for(Index i=innerNNZ-1; i>=0; --i)
348  {
349  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
350  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
351  }
352  }
353  }
354 
355  std::swap(m_outerIndex, newOuterIndex);
356  std::free(newOuterIndex);
357  }
358 
359  }
360  public:
361 
362  //--- low level purely coherent filling ---
363 
374  inline Scalar& insertBack(Index row, Index col)
375  {
376  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
377  }
378 
381  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
382  {
383  eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
384  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
385  Index p = m_outerIndex[outer+1];
386  ++m_outerIndex[outer+1];
387  m_data.append(0, inner);
388  return m_data.value(p);
389  }
390 
393  inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
394  {
395  Index p = m_outerIndex[outer+1];
396  ++m_outerIndex[outer+1];
397  m_data.append(0, inner);
398  return m_data.value(p);
399  }
400 
403  inline void startVec(Index outer)
404  {
405  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
406  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
407  m_outerIndex[outer+1] = m_outerIndex[outer];
408  }
409 
413  inline void finalize()
414  {
415  if(isCompressed())
416  {
417  Index size = static_cast<Index>(m_data.size());
418  Index i = m_outerSize;
419  // find the last filled column
420  while (i>=0 && m_outerIndex[i]==0)
421  --i;
422  ++i;
423  while (i<=m_outerSize)
424  {
425  m_outerIndex[i] = size;
426  ++i;
427  }
428  }
429  }
430 
431  //---
432 
433  template<typename InputIterators>
434  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
435 
436  void sumupDuplicates();
437 
438  //---
439 
442  Scalar& insertByOuterInner(Index j, Index i)
443  {
444  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
445  }
446 
450  {
451  if(isCompressed())
452  return;
453 
454  Index oldStart = m_outerIndex[1];
455  m_outerIndex[1] = m_innerNonZeros[0];
456  for(Index j=1; j<m_outerSize; ++j)
457  {
458  Index nextOldStart = m_outerIndex[j+1];
459  Index offset = oldStart - m_outerIndex[j];
460  if(offset>0)
461  {
462  for(Index k=0; k<m_innerNonZeros[j]; ++k)
463  {
464  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
465  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
466  }
467  }
468  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
469  oldStart = nextOldStart;
470  }
471  std::free(m_innerNonZeros);
472  m_innerNonZeros = 0;
473  m_data.resize(m_outerIndex[m_outerSize]);
474  m_data.squeeze();
475  }
476 
478  void uncompress()
479  {
480  if(m_innerNonZeros != 0)
481  return;
482  m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
483  for (Index i = 0; i < m_outerSize; i++)
484  {
485  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
486  }
487  }
488 
490  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
491  {
492  prune(default_prunning_func(reference,epsilon));
493  }
494 
502  template<typename KeepFunc>
503  void prune(const KeepFunc& keep = KeepFunc())
504  {
505  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
506  // TODO also implement a unit test
507  makeCompressed();
508 
509  Index k = 0;
510  for(Index j=0; j<m_outerSize; ++j)
511  {
512  Index previousStart = m_outerIndex[j];
513  m_outerIndex[j] = k;
514  Index end = m_outerIndex[j+1];
515  for(Index i=previousStart; i<end; ++i)
516  {
517  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
518  {
519  m_data.value(k) = m_data.value(i);
520  m_data.index(k) = m_data.index(i);
521  ++k;
522  }
523  }
524  }
525  m_outerIndex[m_outerSize] = k;
526  m_data.resize(k,0);
527  }
528 
532  void conservativeResize(Index rows, Index cols)
533  {
534  // No change
535  if (this->rows() == rows && this->cols() == cols) return;
536 
537  // If one dimension is null, then there is nothing to be preserved
538  if(rows==0 || cols==0) return resize(rows,cols);
539 
540  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
541  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
542  Index newInnerSize = IsRowMajor ? cols : rows;
543 
544  // Deals with inner non zeros
545  if (m_innerNonZeros)
546  {
547  // Resize m_innerNonZeros
548  Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
549  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
550  m_innerNonZeros = newInnerNonZeros;
551 
552  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
553  m_innerNonZeros[i] = 0;
554  }
555  else if (innerChange < 0)
556  {
557  // Inner size decreased: allocate a new m_innerNonZeros
558  m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
559  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
560  for(Index i = 0; i < m_outerSize; i++)
561  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
562  }
563 
564  // Change the m_innerNonZeros in case of a decrease of inner size
565  if (m_innerNonZeros && innerChange < 0)
566  {
567  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
568  {
569  Index &n = m_innerNonZeros[i];
570  Index start = m_outerIndex[i];
571  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
572  }
573  }
574 
575  m_innerSize = newInnerSize;
576 
577  // Re-allocate outer index structure if necessary
578  if (outerChange == 0)
579  return;
580 
581  Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
582  if (!newOuterIndex) internal::throw_std_bad_alloc();
583  m_outerIndex = newOuterIndex;
584  if (outerChange > 0)
585  {
586  Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
587  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
588  m_outerIndex[i] = last;
589  }
590  m_outerSize += outerChange;
591  }
592 
596  void resize(Index rows, Index cols)
597  {
598  const Index outerSize = IsRowMajor ? rows : cols;
599  m_innerSize = IsRowMajor ? cols : rows;
600  m_data.clear();
601  if (m_outerSize != outerSize || m_outerSize==0)
602  {
603  std::free(m_outerIndex);
604  m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
605  if (!m_outerIndex) internal::throw_std_bad_alloc();
606 
607  m_outerSize = outerSize;
608  }
609  if(m_innerNonZeros)
610  {
611  std::free(m_innerNonZeros);
612  m_innerNonZeros = 0;
613  }
614  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
615  }
616 
619  void resizeNonZeros(Index size)
620  {
621  // TODO remove this function
622  m_data.resize(size);
623  }
624 
626  const Diagonal<const SparseMatrix> diagonal() const { return *this; }
627 
629  inline SparseMatrix()
630  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
631  {
632  check_template_parameters();
633  resize(0, 0);
634  }
635 
637  inline SparseMatrix(Index rows, Index cols)
638  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
639  {
640  check_template_parameters();
641  resize(rows, cols);
642  }
643 
645  template<typename OtherDerived>
647  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
648  {
649  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
650  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
651  check_template_parameters();
652  *this = other.derived();
653  }
654 
656  template<typename OtherDerived, unsigned int UpLo>
658  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
659  {
660  check_template_parameters();
661  *this = other;
662  }
663 
665  inline SparseMatrix(const SparseMatrix& other)
666  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
667  {
668  check_template_parameters();
669  *this = other.derived();
670  }
671 
673  template<typename OtherDerived>
674  SparseMatrix(const ReturnByValue<OtherDerived>& other)
675  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
676  {
677  check_template_parameters();
678  initAssignment(other);
679  other.evalTo(*this);
680  }
681 
684  inline void swap(SparseMatrix& other)
685  {
686  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
687  std::swap(m_outerIndex, other.m_outerIndex);
688  std::swap(m_innerSize, other.m_innerSize);
689  std::swap(m_outerSize, other.m_outerSize);
690  std::swap(m_innerNonZeros, other.m_innerNonZeros);
691  m_data.swap(other.m_data);
692  }
693 
695  inline void setIdentity()
696  {
697  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
698  this->m_data.resize(rows());
699  Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
700  Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
701  Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
702  }
703  inline SparseMatrix& operator=(const SparseMatrix& other)
704  {
705  if (other.isRValue())
706  {
707  swap(other.const_cast_derived());
708  }
709  else if(this!=&other)
710  {
711  initAssignment(other);
712  if(other.isCompressed())
713  {
714  memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
715  m_data = other.m_data;
716  }
717  else
718  {
719  Base::operator=(other);
720  }
721  }
722  return *this;
723  }
724 
725  #ifndef EIGEN_PARSED_BY_DOXYGEN
726  template<typename Lhs, typename Rhs>
727  inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
728  { return Base::operator=(product); }
729 
730  template<typename OtherDerived>
731  inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
732  {
733  initAssignment(other);
734  return Base::operator=(other.derived());
735  }
736 
737  template<typename OtherDerived>
738  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
739  { return Base::operator=(other.derived()); }
740  #endif
741 
742  template<typename OtherDerived>
743  EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
744 
745  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
746  {
747  EIGEN_DBG_SPARSE(
748  s << "Nonzero entries:\n";
749  if(m.isCompressed())
750  for (Index i=0; i<m.nonZeros(); ++i)
751  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
752  else
753  for (Index i=0; i<m.outerSize(); ++i)
754  {
755  Index p = m.m_outerIndex[i];
756  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
757  Index k=p;
758  for (; k<pe; ++k)
759  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
760  for (; k<m.m_outerIndex[i+1]; ++k)
761  s << "(_,_) ";
762  }
763  s << std::endl;
764  s << std::endl;
765  s << "Outer pointers:\n";
766  for (Index i=0; i<m.outerSize(); ++i)
767  s << m.m_outerIndex[i] << " ";
768  s << " $" << std::endl;
769  if(!m.isCompressed())
770  {
771  s << "Inner non zeros:\n";
772  for (Index i=0; i<m.outerSize(); ++i)
773  s << m.m_innerNonZeros[i] << " ";
774  s << " $" << std::endl;
775  }
776  s << std::endl;
777  );
778  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
779  return s;
780  }
781 
783  inline ~SparseMatrix()
784  {
785  std::free(m_outerIndex);
786  std::free(m_innerNonZeros);
787  }
788 
789 #ifndef EIGEN_PARSED_BY_DOXYGEN
790 
791  Scalar sum() const;
792 #endif
793 
794 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
795 # include EIGEN_SPARSEMATRIX_PLUGIN
796 # endif
797 
798 protected:
799 
800  template<typename Other>
801  void initAssignment(const Other& other)
802  {
803  resize(other.rows(), other.cols());
804  if(m_innerNonZeros)
805  {
806  std::free(m_innerNonZeros);
807  m_innerNonZeros = 0;
808  }
809  }
810 
813  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
814 
817  class SingletonVector
818  {
819  Index m_index;
820  Index m_value;
821  public:
822  typedef Index value_type;
823  SingletonVector(Index i, Index v)
824  : m_index(i), m_value(v)
825  {}
826 
827  Index operator[](Index i) const { return i==m_index ? m_value : 0; }
828  };
829 
832  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
833 
834 public:
837  EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
838  {
839  const Index outer = IsRowMajor ? row : col;
840  const Index inner = IsRowMajor ? col : row;
841 
842  eigen_assert(!isCompressed());
843  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
844 
845  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
846  m_data.index(p) = inner;
847  return (m_data.value(p) = 0);
848  }
849 
850 private:
851  static void check_template_parameters()
852  {
853  EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
854  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
855  }
856 
857  struct default_prunning_func {
858  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
859  inline bool operator() (const Index&, const Index&, const Scalar& value) const
860  {
861  return !internal::isMuchSmallerThan(value, reference, epsilon);
862  }
863  Scalar reference;
864  RealScalar epsilon;
865  };
866 };
867 
868 template<typename Scalar, int _Options, typename _Index>
869 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
870 {
871  public:
872  InnerIterator(const SparseMatrix& mat, Index outer)
873  : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
874  {
875  if(mat.isCompressed())
876  m_end = mat.m_outerIndex[outer+1];
877  else
878  m_end = m_id + mat.m_innerNonZeros[outer];
879  }
880 
881  inline InnerIterator& operator++() { m_id++; return *this; }
882 
883  inline const Scalar& value() const { return m_values[m_id]; }
884  inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
885 
886  inline Index index() const { return m_indices[m_id]; }
887  inline Index outer() const { return m_outer; }
888  inline Index row() const { return IsRowMajor ? m_outer : index(); }
889  inline Index col() const { return IsRowMajor ? index() : m_outer; }
890 
891  inline operator bool() const { return (m_id < m_end); }
892 
893  protected:
894  const Scalar* m_values;
895  const Index* m_indices;
896  const Index m_outer;
897  Index m_id;
898  Index m_end;
899 };
900 
901 template<typename Scalar, int _Options, typename _Index>
902 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
903 {
904  public:
905  ReverseInnerIterator(const SparseMatrix& mat, Index outer)
906  : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
907  {
908  if(mat.isCompressed())
909  m_id = mat.m_outerIndex[outer+1];
910  else
911  m_id = m_start + mat.m_innerNonZeros[outer];
912  }
913 
914  inline ReverseInnerIterator& operator--() { --m_id; return *this; }
915 
916  inline const Scalar& value() const { return m_values[m_id-1]; }
917  inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
918 
919  inline Index index() const { return m_indices[m_id-1]; }
920  inline Index outer() const { return m_outer; }
921  inline Index row() const { return IsRowMajor ? m_outer : index(); }
922  inline Index col() const { return IsRowMajor ? index() : m_outer; }
923 
924  inline operator bool() const { return (m_id > m_start); }
925 
926  protected:
927  const Scalar* m_values;
928  const Index* m_indices;
929  const Index m_outer;
930  Index m_id;
931  const Index m_start;
932 };
933 
934 namespace internal {
935 
936 template<typename InputIterator, typename SparseMatrixType>
937 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
938 {
939  EIGEN_UNUSED_VARIABLE(Options);
940  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
941  typedef typename SparseMatrixType::Scalar Scalar;
942  typedef typename SparseMatrixType::Index Index;
943  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
944 
945  if(begin!=end)
946  {
947  // pass 1: count the nnz per inner-vector
948  Matrix<Index,Dynamic,1> wi(trMat.outerSize());
949  wi.setZero();
950  for(InputIterator it(begin); it!=end; ++it)
951  {
952  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
953  wi(IsRowMajor ? it->col() : it->row())++;
954  }
955 
956  // pass 2: insert all the elements into trMat
957  trMat.reserve(wi);
958  for(InputIterator it(begin); it!=end; ++it)
959  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
960 
961  // pass 3:
962  trMat.sumupDuplicates();
963  }
964 
965  // pass 4: transposed copy -> implicit sorting
966  mat = trMat;
967 }
968 
969 }
970 
971 
1009 template<typename Scalar, int _Options, typename _Index>
1010 template<typename InputIterators>
1011 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1012 {
1013  internal::set_from_triplets(begin, end, *this);
1014 }
1015 
1017 template<typename Scalar, int _Options, typename _Index>
1019 {
1020  eigen_assert(!isCompressed());
1021  // TODO, in practice we should be able to use m_innerNonZeros for that task
1022  Matrix<Index,Dynamic,1> wi(innerSize());
1023  wi.fill(-1);
1024  Index count = 0;
1025  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1026  for(Index j=0; j<outerSize(); ++j)
1027  {
1028  Index start = count;
1029  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1030  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1031  {
1032  Index i = m_data.index(k);
1033  if(wi(i)>=start)
1034  {
1035  // we already meet this entry => accumulate it
1036  m_data.value(wi(i)) += m_data.value(k);
1037  }
1038  else
1039  {
1040  m_data.value(count) = m_data.value(k);
1041  m_data.index(count) = m_data.index(k);
1042  wi(i) = count;
1043  ++count;
1044  }
1045  }
1046  m_outerIndex[j] = start;
1047  }
1048  m_outerIndex[m_outerSize] = count;
1049 
1050  // turn the matrix into compressed form
1051  std::free(m_innerNonZeros);
1052  m_innerNonZeros = 0;
1053  m_data.resize(m_outerIndex[m_outerSize]);
1054 }
1055 
1056 template<typename Scalar, int _Options, typename _Index>
1057 template<typename OtherDerived>
1058 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
1059 {
1060  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1061  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1062 
1063  const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
1064  if (needToTranspose)
1065  {
1066  // two passes algorithm:
1067  // 1 - compute the number of coeffs per dest inner vector
1068  // 2 - do the actual copy/eval
1069  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1070  typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
1071  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1072  OtherCopy otherCopy(other.derived());
1073 
1074  SparseMatrix dest(other.rows(),other.cols());
1075  Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
1076 
1077  // pass 1
1078  // FIXME the above copy could be merged with that pass
1079  for (Index j=0; j<otherCopy.outerSize(); ++j)
1080  for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
1081  ++dest.m_outerIndex[it.index()];
1082 
1083  // prefix sum
1084  Index count = 0;
1085  Matrix<Index,Dynamic,1> positions(dest.outerSize());
1086  for (Index j=0; j<dest.outerSize(); ++j)
1087  {
1088  Index tmp = dest.m_outerIndex[j];
1089  dest.m_outerIndex[j] = count;
1090  positions[j] = count;
1091  count += tmp;
1092  }
1093  dest.m_outerIndex[dest.outerSize()] = count;
1094  // alloc
1095  dest.m_data.resize(count);
1096  // pass 2
1097  for (Index j=0; j<otherCopy.outerSize(); ++j)
1098  {
1099  for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
1100  {
1101  Index pos = positions[it.index()]++;
1102  dest.m_data.index(pos) = j;
1103  dest.m_data.value(pos) = it.value();
1104  }
1105  }
1106  this->swap(dest);
1107  return *this;
1108  }
1109  else
1110  {
1111  if(other.isRValue())
1112  initAssignment(other.derived());
1113  // there is no special optimization
1114  return Base::operator=(other.derived());
1115  }
1116 }
1117 
1118 template<typename _Scalar, int _Options, typename _Index>
1119 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
1120 {
1121  eigen_assert(!isCompressed());
1122 
1123  const Index outer = IsRowMajor ? row : col;
1124  const Index inner = IsRowMajor ? col : row;
1125 
1126  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1127  Index innerNNZ = m_innerNonZeros[outer];
1128  if(innerNNZ>=room)
1129  {
1130  // this inner vector is full, we need to reallocate the whole buffer :(
1131  reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
1132  }
1133 
1134  Index startId = m_outerIndex[outer];
1135  Index p = startId + m_innerNonZeros[outer];
1136  while ( (p > startId) && (m_data.index(p-1) > inner) )
1137  {
1138  m_data.index(p) = m_data.index(p-1);
1139  m_data.value(p) = m_data.value(p-1);
1140  --p;
1141  }
1142  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
1143 
1144  m_innerNonZeros[outer]++;
1145 
1146  m_data.index(p) = inner;
1147  return (m_data.value(p) = 0);
1148 }
1149 
1150 template<typename _Scalar, int _Options, typename _Index>
1151 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
1152 {
1153  eigen_assert(isCompressed());
1154 
1155  const Index outer = IsRowMajor ? row : col;
1156  const Index inner = IsRowMajor ? col : row;
1157 
1158  Index previousOuter = outer;
1159  if (m_outerIndex[outer+1]==0)
1160  {
1161  // we start a new inner vector
1162  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1163  {
1164  m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
1165  --previousOuter;
1166  }
1167  m_outerIndex[outer+1] = m_outerIndex[outer];
1168  }
1169 
1170  // here we have to handle the tricky case where the outerIndex array
1171  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1172  // the 2nd inner vector...
1173  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1174  && (size_t(m_outerIndex[outer+1]) == m_data.size());
1175 
1176  size_t startId = m_outerIndex[outer];
1177  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
1178  size_t p = m_outerIndex[outer+1];
1179  ++m_outerIndex[outer+1];
1180 
1181  double reallocRatio = 1;
1182  if (m_data.allocatedSize()<=m_data.size())
1183  {
1184  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1185  if (m_data.size()==0)
1186  {
1187  m_data.reserve(32);
1188  }
1189  else
1190  {
1191  // we need to reallocate the data, to reduce multiple reallocations
1192  // we use a smart resize algorithm based on the current filling ratio
1193  // in addition, we use double to avoid integers overflows
1194  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1195  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1196  // furthermore we bound the realloc ratio to:
1197  // 1) reduce multiple minor realloc when the matrix is almost filled
1198  // 2) avoid to allocate too much memory when the matrix is almost empty
1199  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1200  }
1201  }
1202  m_data.resize(m_data.size()+1,reallocRatio);
1203 
1204  if (!isLastVec)
1205  {
1206  if (previousOuter==-1)
1207  {
1208  // oops wrong guess.
1209  // let's correct the outer offsets
1210  for (Index k=0; k<=(outer+1); ++k)
1211  m_outerIndex[k] = 0;
1212  Index k=outer+1;
1213  while(m_outerIndex[k]==0)
1214  m_outerIndex[k++] = 1;
1215  while (k<=m_outerSize && m_outerIndex[k]!=0)
1216  m_outerIndex[k++]++;
1217  p = 0;
1218  --k;
1219  k = m_outerIndex[k]-1;
1220  while (k>0)
1221  {
1222  m_data.index(k) = m_data.index(k-1);
1223  m_data.value(k) = m_data.value(k-1);
1224  k--;
1225  }
1226  }
1227  else
1228  {
1229  // we are not inserting into the last inner vec
1230  // update outer indices:
1231  Index j = outer+2;
1232  while (j<=m_outerSize && m_outerIndex[j]!=0)
1233  m_outerIndex[j++]++;
1234  --j;
1235  // shift data of last vecs:
1236  Index k = m_outerIndex[j]-1;
1237  while (k>=Index(p))
1238  {
1239  m_data.index(k) = m_data.index(k-1);
1240  m_data.value(k) = m_data.value(k-1);
1241  k--;
1242  }
1243  }
1244  }
1245 
1246  while ( (p > startId) && (m_data.index(p-1) > inner) )
1247  {
1248  m_data.index(p) = m_data.index(p-1);
1249  m_data.value(p) = m_data.value(p-1);
1250  --p;
1251  }
1252 
1253  m_data.index(p) = inner;
1254  return (m_data.value(p) = 0);
1255 }
1256 
1257 } // end namespace Eigen
1258 
1259 #endif // EIGEN_SPARSEMATRIX_H
Index rows() const
Definition: SparseMatrix.h:119
Index * outerIndexPtr()
Definition: SparseMatrix.h:153
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:171
Index cols() const
Definition: SparseMatrix.h:121
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:596
const Scalar * valuePtr() const
Definition: SparseMatrix.h:131
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:532
A versatible sparse matrix representation.
Definition: SparseMatrix.h:85
bool isCompressed() const
Definition: SparseMatrix.h:116
RowXpr row(Index i)
Definition: SparseMatrixBase.h:750
const Diagonal< const SparseMatrix > diagonal() const
Definition: SparseMatrix.h:626
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:684
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:104
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:637
void makeCompressed()
Definition: SparseMatrix.h:449
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:49
const Index * innerNonZeroPtr() const
Definition: SparseMatrix.h:158
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:88
const int Dynamic
Definition: Constants.h:21
const Index * outerIndexPtr() const
Definition: SparseMatrix.h:149
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:189
Definition: EigenBase.h:26
void setIdentity()
Definition: SparseMatrix.h:695
Index * innerNonZeroPtr()
Definition: SparseMatrix.h:162
Index nonZeros() const
Definition: SparseMatrix.h:246
Base class of any sparse matrices or sparse expressions.
Definition: SparseMatrixBase.h:26
Derived & derived()
Definition: EigenBase.h:34
const unsigned int LvalueBit
Definition: Constants.h:131
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:657
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:665
Index size() const
Definition: SparseMatrixBase.h:155
Index * innerIndexPtr()
Definition: SparseMatrix.h:144
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:220
Index outerSize() const
Definition: SparseMatrix.h:126
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:674
SparseMatrix()
Definition: SparseMatrix.h:629
~SparseMatrix()
Definition: SparseMatrix.h:783
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1011
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:646
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:490
const unsigned int RowMajorBit
Definition: Constants.h:53
Scalar * valuePtr()
Definition: SparseMatrix.h:135
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:64
Index innerSize() const
Definition: SparseMatrix.h:124
The matrix class, also used for vectors and row-vectors.
Definition: Matrix.h:127
void reserve(Index reserveSize)
Definition: SparseMatrix.h:256
Sparse matrix.
Definition: MappedSparseMatrix.h:31
void uncompress()
Definition: SparseMatrix.h:478
Definition: Constants.h:266
void setZero()
Definition: SparseMatrix.h:237
const Index * innerIndexPtr() const
Definition: SparseMatrix.h:140
ColXpr col(Index i)
Definition: SparseMatrixBase.h:733
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:503
Definition: Constants.h:264