00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025 #ifndef EIGEN_SPARSEMATRIX_H
00026 #define EIGEN_SPARSEMATRIX_H
00027
00028 namespace Eigen {
00029
00056 namespace internal {
00057 template<typename _Scalar, int _Options, typename _Index>
00058 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
00059 {
00060 typedef _Scalar Scalar;
00061 typedef _Index Index;
00062 typedef Sparse StorageKind;
00063 typedef MatrixXpr XprKind;
00064 enum {
00065 RowsAtCompileTime = Dynamic,
00066 ColsAtCompileTime = Dynamic,
00067 MaxRowsAtCompileTime = Dynamic,
00068 MaxColsAtCompileTime = Dynamic,
00069 Flags = _Options | NestByRefBit | LvalueBit,
00070 CoeffReadCost = NumTraits<Scalar>::ReadCost,
00071 SupportedAccessPatterns = InnerRandomAccessPattern
00072 };
00073 };
00074
00075 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
00076 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
00077 {
00078 typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
00079 typedef typename nested<MatrixType>::type MatrixTypeNested;
00080 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
00081
00082 typedef _Scalar Scalar;
00083 typedef Dense StorageKind;
00084 typedef _Index Index;
00085 typedef MatrixXpr XprKind;
00086
00087 enum {
00088 RowsAtCompileTime = Dynamic,
00089 ColsAtCompileTime = 1,
00090 MaxRowsAtCompileTime = Dynamic,
00091 MaxColsAtCompileTime = 1,
00092 Flags = 0,
00093 CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
00094 };
00095 };
00096
00097 }
00098
00099 template<typename _Scalar, int _Options, typename _Index>
00100 class SparseMatrix
00101 : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
00102 {
00103 public:
00104 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
00105 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
00106 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
00107
00108 typedef MappedSparseMatrix<Scalar,Flags> Map;
00109 using Base::IsRowMajor;
00110 typedef internal::CompressedStorage<Scalar,Index> Storage;
00111 enum {
00112 Options = _Options
00113 };
00114
00115 protected:
00116
00117 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
00118
00119 Index m_outerSize;
00120 Index m_innerSize;
00121 Index* m_outerIndex;
00122 Index* m_innerNonZeros;
00123 Storage m_data;
00124
00125 Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
00126 const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
00127
00128 public:
00129
00131 inline bool isCompressed() const { return m_innerNonZeros==0; }
00132
00134 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
00136 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
00137
00139 inline Index innerSize() const { return m_innerSize; }
00141 inline Index outerSize() const { return m_outerSize; }
00142
00146 inline const Scalar* valuePtr() const { return &m_data.value(0); }
00150 inline Scalar* valuePtr() { return &m_data.value(0); }
00151
00155 inline const Index* innerIndexPtr() const { return &m_data.index(0); }
00159 inline Index* innerIndexPtr() { return &m_data.index(0); }
00160
00164 inline const Index* outerIndexPtr() const { return m_outerIndex; }
00168 inline Index* outerIndexPtr() { return m_outerIndex; }
00169
00173 inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
00177 inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
00178
00180 inline Storage& data() { return m_data; }
00182 inline const Storage& data() const { return m_data; }
00183
00186 inline Scalar coeff(Index row, Index col) const
00187 {
00188 const Index outer = IsRowMajor ? row : col;
00189 const Index inner = IsRowMajor ? col : row;
00190 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
00191 return m_data.atInRange(m_outerIndex[outer], end, inner);
00192 }
00193
00202 inline Scalar& coeffRef(Index row, Index col)
00203 {
00204 const Index outer = IsRowMajor ? row : col;
00205 const Index inner = IsRowMajor ? col : row;
00206
00207 Index start = m_outerIndex[outer];
00208 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
00209 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
00210 if(end<=start)
00211 return insert(row,col);
00212 const Index p = m_data.searchLowerIndex(start,end-1,inner);
00213 if((p<end) && (m_data.index(p)==inner))
00214 return m_data.value(p);
00215 else
00216 return insert(row,col);
00217 }
00218
00231 EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
00232 {
00233 if(isCompressed())
00234 {
00235 reserve(VectorXi::Constant(outerSize(), 2));
00236 }
00237 return insertUncompressed(row,col);
00238 }
00239
00240 public:
00241
00242 class InnerIterator;
00243 class ReverseInnerIterator;
00244
00246 inline void setZero()
00247 {
00248 m_data.clear();
00249 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
00250 if(m_innerNonZeros)
00251 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
00252 }
00253
00255 inline Index nonZeros() const
00256 {
00257 if(m_innerNonZeros)
00258 return innerNonZeros().sum();
00259 return static_cast<Index>(m_data.size());
00260 }
00261
00265 inline void reserve(Index reserveSize)
00266 {
00267 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
00268 m_data.reserve(reserveSize);
00269 }
00270
00271 #ifdef EIGEN_PARSED_BY_DOXYGEN
00272
00275 template<class SizesType>
00276 inline void reserve(const SizesType& reserveSizes);
00277 #else
00278 template<class SizesType>
00279 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
00280 {
00281 EIGEN_UNUSED_VARIABLE(enableif);
00282 reserveInnerVectors(reserveSizes);
00283 }
00284 template<class SizesType>
00285 inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif = typename SizesType::Scalar())
00286 {
00287 EIGEN_UNUSED_VARIABLE(enableif);
00288 reserveInnerVectors(reserveSizes);
00289 }
00290 #endif // EIGEN_PARSED_BY_DOXYGEN
00291 protected:
00292 template<class SizesType>
00293 inline void reserveInnerVectors(const SizesType& reserveSizes)
00294 {
00295
00296 if(isCompressed())
00297 {
00298 std::size_t totalReserveSize = 0;
00299
00300 m_innerNonZeros = new Index[m_outerSize];
00301
00302
00303 Index* newOuterIndex = m_innerNonZeros;
00304
00305 Index count = 0;
00306 for(Index j=0; j<m_outerSize; ++j)
00307 {
00308 newOuterIndex[j] = count;
00309 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
00310 totalReserveSize += reserveSizes[j];
00311 }
00312 m_data.reserve(totalReserveSize);
00313 std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
00314 for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
00315 {
00316 ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
00317 for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
00318 {
00319 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
00320 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
00321 }
00322 previousOuterIndex = m_outerIndex[j];
00323 m_outerIndex[j] = newOuterIndex[j];
00324 m_innerNonZeros[j] = innerNNZ;
00325 }
00326 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
00327
00328 m_data.resize(m_outerIndex[m_outerSize]);
00329 }
00330 else
00331 {
00332 Index* newOuterIndex = new Index[m_outerSize+1];
00333 Index count = 0;
00334 for(Index j=0; j<m_outerSize; ++j)
00335 {
00336 newOuterIndex[j] = count;
00337 Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
00338 Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
00339 count += toReserve + m_innerNonZeros[j];
00340 }
00341 newOuterIndex[m_outerSize] = count;
00342
00343 m_data.resize(count);
00344 for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
00345 {
00346 std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
00347 if(offset>0)
00348 {
00349 std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
00350 for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
00351 {
00352 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
00353 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
00354 }
00355 }
00356 }
00357
00358 std::swap(m_outerIndex, newOuterIndex);
00359 delete[] newOuterIndex;
00360 }
00361
00362 }
00363 public:
00364
00365
00366
00377 inline Scalar& insertBack(Index row, Index col)
00378 {
00379 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
00380 }
00381
00384 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
00385 {
00386 eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
00387 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
00388 Index p = m_outerIndex[outer+1];
00389 ++m_outerIndex[outer+1];
00390 m_data.append(0, inner);
00391 return m_data.value(p);
00392 }
00393
00396 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
00397 {
00398 Index p = m_outerIndex[outer+1];
00399 ++m_outerIndex[outer+1];
00400 m_data.append(0, inner);
00401 return m_data.value(p);
00402 }
00403
00406 inline void startVec(Index outer)
00407 {
00408 eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
00409 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
00410 m_outerIndex[outer+1] = m_outerIndex[outer];
00411 }
00412
00416 inline void finalize()
00417 {
00418 if(isCompressed())
00419 {
00420 Index size = static_cast<Index>(m_data.size());
00421 Index i = m_outerSize;
00422
00423 while (i>=0 && m_outerIndex[i]==0)
00424 --i;
00425 ++i;
00426 while (i<=m_outerSize)
00427 {
00428 m_outerIndex[i] = size;
00429 ++i;
00430 }
00431 }
00432 }
00433
00434
00435
00436 template<typename InputIterators>
00437 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
00438
00439 void sumupDuplicates();
00440
00441
00442
00445 EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
00446 {
00447 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
00448 }
00449
00452 void makeCompressed()
00453 {
00454 if(isCompressed())
00455 return;
00456
00457 Index oldStart = m_outerIndex[1];
00458 m_outerIndex[1] = m_innerNonZeros[0];
00459 for(Index j=1; j<m_outerSize; ++j)
00460 {
00461 Index nextOldStart = m_outerIndex[j+1];
00462 std::ptrdiff_t offset = oldStart - m_outerIndex[j];
00463 if(offset>0)
00464 {
00465 for(Index k=0; k<m_innerNonZeros[j]; ++k)
00466 {
00467 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
00468 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
00469 }
00470 }
00471 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
00472 oldStart = nextOldStart;
00473 }
00474 delete[] m_innerNonZeros;
00475 m_innerNonZeros = 0;
00476 m_data.resize(m_outerIndex[m_outerSize]);
00477 m_data.squeeze();
00478 }
00479
00481 void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
00482 {
00483 prune(default_prunning_func(reference,epsilon));
00484 }
00485
00493 template<typename KeepFunc>
00494 void prune(const KeepFunc& keep = KeepFunc())
00495 {
00496
00497
00498 makeCompressed();
00499
00500 Index k = 0;
00501 for(Index j=0; j<m_outerSize; ++j)
00502 {
00503 Index previousStart = m_outerIndex[j];
00504 m_outerIndex[j] = k;
00505 Index end = m_outerIndex[j+1];
00506 for(Index i=previousStart; i<end; ++i)
00507 {
00508 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
00509 {
00510 m_data.value(k) = m_data.value(i);
00511 m_data.index(k) = m_data.index(i);
00512 ++k;
00513 }
00514 }
00515 }
00516 m_outerIndex[m_outerSize] = k;
00517 m_data.resize(k,0);
00518 }
00519
00523 void resize(Index rows, Index cols)
00524 {
00525 const Index outerSize = IsRowMajor ? rows : cols;
00526 m_innerSize = IsRowMajor ? cols : rows;
00527 m_data.clear();
00528 if (m_outerSize != outerSize || m_outerSize==0)
00529 {
00530 delete[] m_outerIndex;
00531 m_outerIndex = new Index [outerSize+1];
00532 m_outerSize = outerSize;
00533 }
00534 if(m_innerNonZeros)
00535 {
00536 delete[] m_innerNonZeros;
00537 m_innerNonZeros = 0;
00538 }
00539 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
00540 }
00541
00544 void resizeNonZeros(Index size)
00545 {
00546
00547 m_data.resize(size);
00548 }
00549
00551 const Diagonal<const SparseMatrix> diagonal() const { return *this; }
00552
00554 inline SparseMatrix()
00555 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00556 {
00557 check_template_parameters();
00558 resize(0, 0);
00559 }
00560
00562 inline SparseMatrix(Index rows, Index cols)
00563 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00564 {
00565 check_template_parameters();
00566 resize(rows, cols);
00567 }
00568
00570 template<typename OtherDerived>
00571 inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
00572 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00573 {
00574 check_template_parameters();
00575 *this = other.derived();
00576 }
00577
00579 inline SparseMatrix(const SparseMatrix& other)
00580 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00581 {
00582 check_template_parameters();
00583 *this = other.derived();
00584 }
00585
00588 inline void swap(SparseMatrix& other)
00589 {
00590
00591 std::swap(m_outerIndex, other.m_outerIndex);
00592 std::swap(m_innerSize, other.m_innerSize);
00593 std::swap(m_outerSize, other.m_outerSize);
00594 std::swap(m_innerNonZeros, other.m_innerNonZeros);
00595 m_data.swap(other.m_data);
00596 }
00597
00598 inline SparseMatrix& operator=(const SparseMatrix& other)
00599 {
00600 if (other.isRValue())
00601 {
00602 swap(other.const_cast_derived());
00603 }
00604 else
00605 {
00606 initAssignment(other);
00607 if(other.isCompressed())
00608 {
00609 memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
00610 m_data = other.m_data;
00611 }
00612 else
00613 {
00614 Base::operator=(other);
00615 }
00616 }
00617 return *this;
00618 }
00619
00620 #ifndef EIGEN_PARSED_BY_DOXYGEN
00621 template<typename Lhs, typename Rhs>
00622 inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
00623 { return Base::operator=(product); }
00624
00625 template<typename OtherDerived>
00626 inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
00627 { return Base::operator=(other.derived()); }
00628
00629 template<typename OtherDerived>
00630 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
00631 { return Base::operator=(other.derived()); }
00632 #endif
00633
00634 template<typename OtherDerived>
00635 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
00636 {
00637 initAssignment(other.derived());
00638 const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
00639 if (needToTranspose)
00640 {
00641
00642
00643
00644
00645 typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
00646 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
00647 OtherCopy otherCopy(other.derived());
00648
00649 Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
00650
00651
00652 for (Index j=0; j<otherCopy.outerSize(); ++j)
00653 for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
00654 ++m_outerIndex[it.index()];
00655
00656
00657 Index count = 0;
00658 VectorXi positions(outerSize());
00659 for (Index j=0; j<outerSize(); ++j)
00660 {
00661 Index tmp = m_outerIndex[j];
00662 m_outerIndex[j] = count;
00663 positions[j] = count;
00664 count += tmp;
00665 }
00666 m_outerIndex[outerSize()] = count;
00667
00668 m_data.resize(count);
00669
00670 for (Index j=0; j<otherCopy.outerSize(); ++j)
00671 {
00672 for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
00673 {
00674 Index pos = positions[it.index()]++;
00675 m_data.index(pos) = j;
00676 m_data.value(pos) = it.value();
00677 }
00678 }
00679 return *this;
00680 }
00681 else
00682 {
00683
00684 return Base::operator=(other.derived());
00685 }
00686 }
00687
00688 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
00689 {
00690 EIGEN_DBG_SPARSE(
00691 s << "Nonzero entries:\n";
00692 if(m.isCompressed())
00693 for (Index i=0; i<m.nonZeros(); ++i)
00694 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
00695 else
00696 for (Index i=0; i<m.outerSize(); ++i)
00697 {
00698 int p = m.m_outerIndex[i];
00699 int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
00700 Index k=p;
00701 for (; k<pe; ++k)
00702 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
00703 for (; k<m.m_outerIndex[i+1]; ++k)
00704 s << "(_,_) ";
00705 }
00706 s << std::endl;
00707 s << std::endl;
00708 s << "Outer pointers:\n";
00709 for (Index i=0; i<m.outerSize(); ++i)
00710 s << m.m_outerIndex[i] << " ";
00711 s << " $" << std::endl;
00712 if(!m.isCompressed())
00713 {
00714 s << "Inner non zeros:\n";
00715 for (Index i=0; i<m.outerSize(); ++i)
00716 s << m.m_innerNonZeros[i] << " ";
00717 s << " $" << std::endl;
00718 }
00719 s << std::endl;
00720 );
00721 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
00722 return s;
00723 }
00724
00726 inline ~SparseMatrix()
00727 {
00728 delete[] m_outerIndex;
00729 delete[] m_innerNonZeros;
00730 }
00731
00732 #ifndef EIGEN_PARSED_BY_DOXYGEN
00733
00734 Scalar sum() const;
00735 #endif
00736
00737 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
00738 # include EIGEN_SPARSEMATRIX_PLUGIN
00739 # endif
00740
00741 protected:
00742
00743 template<typename Other>
00744 void initAssignment(const Other& other)
00745 {
00746 resize(other.rows(), other.cols());
00747 if(m_innerNonZeros)
00748 {
00749 delete[] m_innerNonZeros;
00750 m_innerNonZeros = 0;
00751 }
00752 }
00753
00756 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
00757 {
00758 eigen_assert(isCompressed());
00759
00760 const Index outer = IsRowMajor ? row : col;
00761 const Index inner = IsRowMajor ? col : row;
00762
00763 Index previousOuter = outer;
00764 if (m_outerIndex[outer+1]==0)
00765 {
00766
00767 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
00768 {
00769 m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
00770 --previousOuter;
00771 }
00772 m_outerIndex[outer+1] = m_outerIndex[outer];
00773 }
00774
00775
00776
00777
00778 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
00779 && (size_t(m_outerIndex[outer+1]) == m_data.size());
00780
00781 size_t startId = m_outerIndex[outer];
00782
00783 size_t p = m_outerIndex[outer+1];
00784 ++m_outerIndex[outer+1];
00785
00786 float reallocRatio = 1;
00787 if (m_data.allocatedSize()<=m_data.size())
00788 {
00789
00790 if (m_data.size()==0)
00791 {
00792 m_data.reserve(32);
00793 }
00794 else
00795 {
00796
00797
00798
00799 float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
00800 reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
00801
00802
00803
00804 reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
00805 }
00806 }
00807 m_data.resize(m_data.size()+1,reallocRatio);
00808
00809 if (!isLastVec)
00810 {
00811 if (previousOuter==-1)
00812 {
00813
00814
00815 for (Index k=0; k<=(outer+1); ++k)
00816 m_outerIndex[k] = 0;
00817 Index k=outer+1;
00818 while(m_outerIndex[k]==0)
00819 m_outerIndex[k++] = 1;
00820 while (k<=m_outerSize && m_outerIndex[k]!=0)
00821 m_outerIndex[k++]++;
00822 p = 0;
00823 --k;
00824 k = m_outerIndex[k]-1;
00825 while (k>0)
00826 {
00827 m_data.index(k) = m_data.index(k-1);
00828 m_data.value(k) = m_data.value(k-1);
00829 k--;
00830 }
00831 }
00832 else
00833 {
00834
00835
00836 Index j = outer+2;
00837 while (j<=m_outerSize && m_outerIndex[j]!=0)
00838 m_outerIndex[j++]++;
00839 --j;
00840
00841 Index k = m_outerIndex[j]-1;
00842 while (k>=Index(p))
00843 {
00844 m_data.index(k) = m_data.index(k-1);
00845 m_data.value(k) = m_data.value(k-1);
00846 k--;
00847 }
00848 }
00849 }
00850
00851 while ( (p > startId) && (m_data.index(p-1) > inner) )
00852 {
00853 m_data.index(p) = m_data.index(p-1);
00854 m_data.value(p) = m_data.value(p-1);
00855 --p;
00856 }
00857
00858 m_data.index(p) = inner;
00859 return (m_data.value(p) = 0);
00860 }
00861
00864 class SingletonVector
00865 {
00866 Index m_index;
00867 Index m_value;
00868 public:
00869 typedef Index value_type;
00870 SingletonVector(Index i, Index v)
00871 : m_index(i), m_value(v)
00872 {}
00873
00874 Index operator[](Index i) const { return i==m_index ? m_value : 0; }
00875 };
00876
00879 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
00880 {
00881 eigen_assert(!isCompressed());
00882
00883 const Index outer = IsRowMajor ? row : col;
00884 const Index inner = IsRowMajor ? col : row;
00885
00886 std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
00887 std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
00888 if(innerNNZ>=room)
00889 {
00890
00891 reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
00892 }
00893
00894 Index startId = m_outerIndex[outer];
00895 Index p = startId + m_innerNonZeros[outer];
00896 while ( (p > startId) && (m_data.index(p-1) > inner) )
00897 {
00898 m_data.index(p) = m_data.index(p-1);
00899 m_data.value(p) = m_data.value(p-1);
00900 --p;
00901 }
00902
00903 m_innerNonZeros[outer]++;
00904
00905 m_data.index(p) = inner;
00906 return (m_data.value(p) = 0);
00907 }
00908
00909 public:
00912 inline Scalar& insertBackUncompressed(Index row, Index col)
00913 {
00914 const Index outer = IsRowMajor ? row : col;
00915 const Index inner = IsRowMajor ? col : row;
00916
00917 eigen_assert(!isCompressed());
00918 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
00919
00920 Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
00921 m_innerNonZeros[outer]++;
00922 m_data.index(p) = inner;
00923 return (m_data.value(p) = 0);
00924 }
00925
00926 private:
00927 static void check_template_parameters()
00928 {
00929 EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
00930 }
00931
00932 struct default_prunning_func {
00933 default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
00934 inline bool operator() (const Index&, const Index&, const Scalar& value) const
00935 {
00936 return !internal::isMuchSmallerThan(value, reference, epsilon);
00937 }
00938 Scalar reference;
00939 RealScalar epsilon;
00940 };
00941 };
00942
00943 template<typename Scalar, int _Options, typename _Index>
00944 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
00945 {
00946 public:
00947 InnerIterator(const SparseMatrix& mat, Index outer)
00948 : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
00949 {
00950 if(mat.isCompressed())
00951 m_end = mat.m_outerIndex[outer+1];
00952 else
00953 m_end = m_id + mat.m_innerNonZeros[outer];
00954 }
00955
00956 inline InnerIterator& operator++() { m_id++; return *this; }
00957
00958 inline const Scalar& value() const { return m_values[m_id]; }
00959 inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
00960
00961 inline Index index() const { return m_indices[m_id]; }
00962 inline Index outer() const { return m_outer; }
00963 inline Index row() const { return IsRowMajor ? m_outer : index(); }
00964 inline Index col() const { return IsRowMajor ? index() : m_outer; }
00965
00966 inline operator bool() const { return (m_id < m_end); }
00967
00968 protected:
00969 const Scalar* m_values;
00970 const Index* m_indices;
00971 const Index m_outer;
00972 Index m_id;
00973 Index m_end;
00974 };
00975
00976 template<typename Scalar, int _Options, typename _Index>
00977 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
00978 {
00979 public:
00980 ReverseInnerIterator(const SparseMatrix& mat, Index outer)
00981 : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
00982 {
00983 if(mat.isCompressed())
00984 m_id = mat.m_outerIndex[outer+1];
00985 else
00986 m_id = m_start + mat.m_innerNonZeros[outer];
00987 }
00988
00989 inline ReverseInnerIterator& operator--() { --m_id; return *this; }
00990
00991 inline const Scalar& value() const { return m_values[m_id-1]; }
00992 inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
00993
00994 inline Index index() const { return m_indices[m_id-1]; }
00995 inline Index outer() const { return m_outer; }
00996 inline Index row() const { return IsRowMajor ? m_outer : index(); }
00997 inline Index col() const { return IsRowMajor ? index() : m_outer; }
00998
00999 inline operator bool() const { return (m_id > m_start); }
01000
01001 protected:
01002 const Scalar* m_values;
01003 const Index* m_indices;
01004 const Index m_outer;
01005 Index m_id;
01006 const Index m_start;
01007 };
01008
01009 namespace internal {
01010
01011 template<typename InputIterator, typename SparseMatrixType>
01012 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
01013 {
01014 EIGEN_UNUSED_VARIABLE(Options);
01015 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
01016 typedef typename SparseMatrixType::Scalar Scalar;
01017 typedef typename SparseMatrixType::Index Index;
01018 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
01019
01020
01021 VectorXi wi(trMat.outerSize());
01022 wi.setZero();
01023 for(InputIterator it(begin); it!=end; ++it)
01024 wi(IsRowMajor ? it->col() : it->row())++;
01025
01026
01027 trMat.reserve(wi);
01028 for(InputIterator it(begin); it!=end; ++it)
01029 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
01030
01031
01032 trMat.sumupDuplicates();
01033
01034
01035 mat = trMat;
01036 }
01037
01038 }
01039
01040
01078 template<typename Scalar, int _Options, typename _Index>
01079 template<typename InputIterators>
01080 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
01081 {
01082 internal::set_from_triplets(begin, end, *this);
01083 }
01084
01086 template<typename Scalar, int _Options, typename _Index>
01087 void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
01088 {
01089 eigen_assert(!isCompressed());
01090
01091 VectorXi wi(innerSize());
01092 wi.fill(-1);
01093 Index count = 0;
01094
01095 for(int j=0; j<outerSize(); ++j)
01096 {
01097 Index start = count;
01098 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
01099 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
01100 {
01101 Index i = m_data.index(k);
01102 if(wi(i)>=start)
01103 {
01104
01105 m_data.value(wi(i)) += m_data.value(k);
01106 }
01107 else
01108 {
01109 m_data.value(count) = m_data.value(k);
01110 m_data.index(count) = m_data.index(k);
01111 wi(i) = count;
01112 ++count;
01113 }
01114 }
01115 m_outerIndex[j] = start;
01116 }
01117 m_outerIndex[m_outerSize] = count;
01118
01119
01120 delete[] m_innerNonZeros;
01121 m_innerNonZeros = 0;
01122 m_data.resize(m_outerIndex[m_outerSize]);
01123 }
01124
01125 }
01126
01127 #endif // EIGEN_SPARSEMATRIX_H