1 | // This file is part of Eigen, a lightweight C++ template library |
2 | // for linear algebra. |
3 | // |
4 | // Copyright (C) 2008-2014 Gael Guennebaud <[email protected]> |
5 | // |
6 | // This Source Code Form is subject to the terms of the Mozilla |
7 | // Public License v. 2.0. If a copy of the MPL was not distributed |
8 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. |
9 | |
10 | #ifndef EIGEN_SPARSEMATRIX_H |
11 | #define EIGEN_SPARSEMATRIX_H |
12 | |
13 | namespace Eigen { |
14 | |
15 | /** \ingroup SparseCore_Module |
16 | * |
17 | * \class SparseMatrix |
18 | * |
19 | * \brief A versatible sparse matrix representation |
20 | * |
21 | * This class implements a more versatile variants of the common \em compressed row/column storage format. |
22 | * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. |
23 | * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra |
24 | * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero |
25 | * can be done with limited memory reallocation and copies. |
26 | * |
27 | * A call to the function makeCompressed() turns the matrix into the standard \em compressed format |
28 | * compatible with many library. |
29 | * |
30 | * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages". |
31 | * |
32 | * \tparam _Scalar the scalar type, i.e. the type of the coefficients |
33 | * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility |
34 | * is ColMajor or RowMajor. The default is 0 which means column-major. |
35 | * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int. |
36 | * |
37 | * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int), |
38 | * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index. |
39 | * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead. |
40 | * |
41 | * This class can be extended with the help of the plugin mechanism described on the page |
42 | * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN. |
43 | */ |
44 | |
45 | namespace internal { |
46 | template<typename _Scalar, int _Options, typename _StorageIndex> |
47 | struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> > |
48 | { |
49 | typedef _Scalar Scalar; |
50 | typedef _StorageIndex StorageIndex; |
51 | typedef Sparse StorageKind; |
52 | typedef MatrixXpr XprKind; |
53 | enum { |
54 | RowsAtCompileTime = Dynamic, |
55 | ColsAtCompileTime = Dynamic, |
56 | MaxRowsAtCompileTime = Dynamic, |
57 | MaxColsAtCompileTime = Dynamic, |
58 | Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit, |
59 | SupportedAccessPatterns = InnerRandomAccessPattern |
60 | }; |
61 | }; |
62 | |
63 | template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex> |
64 | struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> > |
65 | { |
66 | typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType; |
67 | typedef typename ref_selector<MatrixType>::type MatrixTypeNested; |
68 | typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested; |
69 | |
70 | typedef _Scalar Scalar; |
71 | typedef Dense StorageKind; |
72 | typedef _StorageIndex StorageIndex; |
73 | typedef MatrixXpr XprKind; |
74 | |
75 | enum { |
76 | RowsAtCompileTime = Dynamic, |
77 | ColsAtCompileTime = 1, |
78 | MaxRowsAtCompileTime = Dynamic, |
79 | MaxColsAtCompileTime = 1, |
80 | Flags = LvalueBit |
81 | }; |
82 | }; |
83 | |
84 | template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex> |
85 | struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> > |
86 | : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> > |
87 | { |
88 | enum { |
89 | Flags = 0 |
90 | }; |
91 | }; |
92 | |
93 | } // end namespace internal |
94 | |
95 | template<typename _Scalar, int _Options, typename _StorageIndex> |
96 | class SparseMatrix |
97 | : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> > |
98 | { |
99 | typedef SparseCompressedBase<SparseMatrix> Base; |
100 | using Base::convert_index; |
101 | friend class SparseVector<_Scalar,0,_StorageIndex>; |
102 | public: |
103 | using Base::isCompressed; |
104 | using Base::nonZeros; |
105 | EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix) |
106 | using Base::operator+=; |
107 | using Base::operator-=; |
108 | |
109 | typedef MappedSparseMatrix<Scalar,Flags> Map; |
110 | typedef Diagonal<SparseMatrix> DiagonalReturnType; |
111 | typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType; |
112 | typedef typename Base::InnerIterator InnerIterator; |
113 | typedef typename Base::ReverseInnerIterator ReverseInnerIterator; |
114 | |
115 | |
116 | using Base::IsRowMajor; |
117 | typedef internal::CompressedStorage<Scalar,StorageIndex> Storage; |
118 | enum { |
119 | Options = _Options |
120 | }; |
121 | |
122 | typedef typename Base::IndexVector IndexVector; |
123 | typedef typename Base::ScalarVector ScalarVector; |
124 | protected: |
125 | typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix; |
126 | |
127 | Index m_outerSize; |
128 | Index m_innerSize; |
129 | StorageIndex* m_outerIndex; |
130 | StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed |
131 | Storage m_data; |
132 | |
133 | public: |
134 | |
135 | /** \returns the number of rows of the matrix */ |
136 | inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } |
137 | /** \returns the number of columns of the matrix */ |
138 | inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } |
139 | |
140 | /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */ |
141 | inline Index innerSize() const { return m_innerSize; } |
142 | /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */ |
143 | inline Index outerSize() const { return m_outerSize; } |
144 | |
145 | /** \returns a const pointer to the array of values. |
146 | * This function is aimed at interoperability with other libraries. |
147 | * \sa innerIndexPtr(), outerIndexPtr() */ |
148 | inline const Scalar* valuePtr() const { return m_data.valuePtr(); } |
149 | /** \returns a non-const pointer to the array of values. |
150 | * This function is aimed at interoperability with other libraries. |
151 | * \sa innerIndexPtr(), outerIndexPtr() */ |
152 | inline Scalar* valuePtr() { return m_data.valuePtr(); } |
153 | |
154 | /** \returns a const pointer to the array of inner indices. |
155 | * This function is aimed at interoperability with other libraries. |
156 | * \sa valuePtr(), outerIndexPtr() */ |
157 | inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); } |
158 | /** \returns a non-const pointer to the array of inner indices. |
159 | * This function is aimed at interoperability with other libraries. |
160 | * \sa valuePtr(), outerIndexPtr() */ |
161 | inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); } |
162 | |
163 | /** \returns a const pointer to the array of the starting positions of the inner vectors. |
164 | * This function is aimed at interoperability with other libraries. |
165 | * \sa valuePtr(), innerIndexPtr() */ |
166 | inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; } |
167 | /** \returns a non-const pointer to the array of the starting positions of the inner vectors. |
168 | * This function is aimed at interoperability with other libraries. |
169 | * \sa valuePtr(), innerIndexPtr() */ |
170 | inline StorageIndex* outerIndexPtr() { return m_outerIndex; } |
171 | |
172 | /** \returns a const pointer to the array of the number of non zeros of the inner vectors. |
173 | * This function is aimed at interoperability with other libraries. |
174 | * \warning it returns the null pointer 0 in compressed mode */ |
175 | inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; } |
176 | /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors. |
177 | * This function is aimed at interoperability with other libraries. |
178 | * \warning it returns the null pointer 0 in compressed mode */ |
179 | inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; } |
180 | |
181 | /** \internal */ |
182 | inline Storage& data() { return m_data; } |
183 | /** \internal */ |
184 | inline const Storage& data() const { return m_data; } |
185 | |
186 | /** \returns the value of the matrix at position \a i, \a j |
187 | * This function returns Scalar(0) if the element is an explicit \em zero */ |
188 | inline Scalar coeff(Index row, Index col) const |
189 | { |
190 | eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); |
191 | |
192 | const Index outer = IsRowMajor ? row : col; |
193 | const Index inner = IsRowMajor ? col : row; |
194 | Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; |
195 | return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner)); |
196 | } |
197 | |
198 | /** \returns a non-const reference to the value of the matrix at position \a i, \a j |
199 | * |
200 | * If the element does not exist then it is inserted via the insert(Index,Index) function |
201 | * which itself turns the matrix into a non compressed form if that was not the case. |
202 | * |
203 | * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index) |
204 | * function if the element does not already exist. |
205 | */ |
206 | inline Scalar& coeffRef(Index row, Index col) |
207 | { |
208 | eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); |
209 | |
210 | const Index outer = IsRowMajor ? row : col; |
211 | const Index inner = IsRowMajor ? col : row; |
212 | |
213 | Index start = m_outerIndex[outer]; |
214 | Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; |
215 | eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix" ); |
216 | if(end<=start) |
217 | return insert(row,col); |
218 | const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner)); |
219 | if((p<end) && (m_data.index(p)==inner)) |
220 | return m_data.value(p); |
221 | else |
222 | return insert(row,col); |
223 | } |
224 | |
225 | /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col. |
226 | * The non zero coefficient must \b not already exist. |
227 | * |
228 | * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed |
229 | * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier. |
230 | * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be |
231 | * inserted by increasing outer-indices. |
232 | * |
233 | * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first |
234 | * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector. |
235 | * |
236 | * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1) |
237 | * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion. |
238 | * |
239 | */ |
240 | Scalar& insert(Index row, Index col); |
241 | |
242 | public: |
243 | |
244 | /** Removes all non zeros but keep allocated memory |
245 | * |
246 | * This function does not free the currently allocated memory. To release as much as memory as possible, |
247 | * call \code mat.data().squeeze(); \endcode after resizing it. |
248 | * |
249 | * \sa resize(Index,Index), data() |
250 | */ |
251 | inline void setZero() |
252 | { |
253 | m_data.clear(); |
254 | memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); |
255 | if(m_innerNonZeros) |
256 | memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); |
257 | } |
258 | |
259 | /** Preallocates \a reserveSize non zeros. |
260 | * |
261 | * Precondition: the matrix must be in compressed mode. */ |
262 | inline void reserve(Index reserveSize) |
263 | { |
264 | eigen_assert(isCompressed() && "This function does not make sense in non compressed mode." ); |
265 | m_data.reserve(reserveSize); |
266 | } |
267 | |
268 | #ifdef EIGEN_PARSED_BY_DOXYGEN |
269 | /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j. |
270 | * |
271 | * This function turns the matrix in non-compressed mode. |
272 | * |
273 | * The type \c SizesType must expose the following interface: |
274 | \code |
275 | typedef value_type; |
276 | const value_type& operator[](i) const; |
277 | \endcode |
278 | * for \c i in the [0,this->outerSize()[ range. |
279 | * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc. |
280 | */ |
281 | template<class SizesType> |
282 | inline void reserve(const SizesType& reserveSizes); |
283 | #else |
284 | template<class SizesType> |
285 | inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = |
286 | #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename |
287 | typename |
288 | #endif |
289 | SizesType::value_type()) |
290 | { |
291 | EIGEN_UNUSED_VARIABLE(enableif); |
292 | reserveInnerVectors(reserveSizes); |
293 | } |
294 | #endif // EIGEN_PARSED_BY_DOXYGEN |
295 | protected: |
296 | template<class SizesType> |
297 | inline void reserveInnerVectors(const SizesType& reserveSizes) |
298 | { |
299 | if(isCompressed()) |
300 | { |
301 | Index totalReserveSize = 0; |
302 | // turn the matrix into non-compressed mode |
303 | m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); |
304 | if (!m_innerNonZeros) internal::throw_std_bad_alloc(); |
305 | |
306 | // temporarily use m_innerSizes to hold the new starting points. |
307 | StorageIndex* newOuterIndex = m_innerNonZeros; |
308 | |
309 | StorageIndex count = 0; |
310 | for(Index j=0; j<m_outerSize; ++j) |
311 | { |
312 | newOuterIndex[j] = count; |
313 | count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]); |
314 | totalReserveSize += reserveSizes[j]; |
315 | } |
316 | m_data.reserve(totalReserveSize); |
317 | StorageIndex previousOuterIndex = m_outerIndex[m_outerSize]; |
318 | for(Index j=m_outerSize-1; j>=0; --j) |
319 | { |
320 | StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j]; |
321 | for(Index i=innerNNZ-1; i>=0; --i) |
322 | { |
323 | m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); |
324 | m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); |
325 | } |
326 | previousOuterIndex = m_outerIndex[j]; |
327 | m_outerIndex[j] = newOuterIndex[j]; |
328 | m_innerNonZeros[j] = innerNNZ; |
329 | } |
330 | if(m_outerSize>0) |
331 | m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1]; |
332 | |
333 | m_data.resize(m_outerIndex[m_outerSize]); |
334 | } |
335 | else |
336 | { |
337 | StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex))); |
338 | if (!newOuterIndex) internal::throw_std_bad_alloc(); |
339 | |
340 | StorageIndex count = 0; |
341 | for(Index j=0; j<m_outerSize; ++j) |
342 | { |
343 | newOuterIndex[j] = count; |
344 | StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j]; |
345 | StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved); |
346 | count += toReserve + m_innerNonZeros[j]; |
347 | } |
348 | newOuterIndex[m_outerSize] = count; |
349 | |
350 | m_data.resize(count); |
351 | for(Index j=m_outerSize-1; j>=0; --j) |
352 | { |
353 | Index offset = newOuterIndex[j] - m_outerIndex[j]; |
354 | if(offset>0) |
355 | { |
356 | StorageIndex innerNNZ = m_innerNonZeros[j]; |
357 | for(Index i=innerNNZ-1; i>=0; --i) |
358 | { |
359 | m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); |
360 | m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); |
361 | } |
362 | } |
363 | } |
364 | |
365 | std::swap(m_outerIndex, newOuterIndex); |
366 | std::free(newOuterIndex); |
367 | } |
368 | |
369 | } |
370 | public: |
371 | |
372 | //--- low level purely coherent filling --- |
373 | |
374 | /** \internal |
375 | * \returns a reference to the non zero coefficient at position \a row, \a col assuming that: |
376 | * - the nonzero does not already exist |
377 | * - the new coefficient is the last one according to the storage order |
378 | * |
379 | * Before filling a given inner vector you must call the statVec(Index) function. |
380 | * |
381 | * After an insertion session, you should call the finalize() function. |
382 | * |
383 | * \sa insert, insertBackByOuterInner, startVec */ |
384 | inline Scalar& insertBack(Index row, Index col) |
385 | { |
386 | return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row); |
387 | } |
388 | |
389 | /** \internal |
390 | * \sa insertBack, startVec */ |
391 | inline Scalar& insertBackByOuterInner(Index outer, Index inner) |
392 | { |
393 | eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)" ); |
394 | eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)" ); |
395 | Index p = m_outerIndex[outer+1]; |
396 | ++m_outerIndex[outer+1]; |
397 | m_data.append(Scalar(0), inner); |
398 | return m_data.value(p); |
399 | } |
400 | |
401 | /** \internal |
402 | * \warning use it only if you know what you are doing */ |
403 | inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner) |
404 | { |
405 | Index p = m_outerIndex[outer+1]; |
406 | ++m_outerIndex[outer+1]; |
407 | m_data.append(Scalar(0), inner); |
408 | return m_data.value(p); |
409 | } |
410 | |
411 | /** \internal |
412 | * \sa insertBack, insertBackByOuterInner */ |
413 | inline void startVec(Index outer) |
414 | { |
415 | eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially" ); |
416 | eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially" ); |
417 | m_outerIndex[outer+1] = m_outerIndex[outer]; |
418 | } |
419 | |
420 | /** \internal |
421 | * Must be called after inserting a set of non zero entries using the low level compressed API. |
422 | */ |
423 | inline void finalize() |
424 | { |
425 | if(isCompressed()) |
426 | { |
427 | StorageIndex size = internal::convert_index<StorageIndex>(m_data.size()); |
428 | Index i = m_outerSize; |
429 | // find the last filled column |
430 | while (i>=0 && m_outerIndex[i]==0) |
431 | --i; |
432 | ++i; |
433 | while (i<=m_outerSize) |
434 | { |
435 | m_outerIndex[i] = size; |
436 | ++i; |
437 | } |
438 | } |
439 | } |
440 | |
441 | //--- |
442 | |
443 | template<typename InputIterators> |
444 | void setFromTriplets(const InputIterators& begin, const InputIterators& end); |
445 | |
446 | template<typename InputIterators,typename DupFunctor> |
447 | void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func); |
448 | |
449 | void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); } |
450 | |
451 | template<typename DupFunctor> |
452 | void collapseDuplicates(DupFunctor dup_func = DupFunctor()); |
453 | |
454 | //--- |
455 | |
456 | /** \internal |
457 | * same as insert(Index,Index) except that the indices are given relative to the storage order */ |
458 | Scalar& insertByOuterInner(Index j, Index i) |
459 | { |
460 | return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); |
461 | } |
462 | |
463 | /** Turns the matrix into the \em compressed format. |
464 | */ |
465 | void makeCompressed() |
466 | { |
467 | if(isCompressed()) |
468 | return; |
469 | |
470 | eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0); |
471 | |
472 | Index oldStart = m_outerIndex[1]; |
473 | m_outerIndex[1] = m_innerNonZeros[0]; |
474 | for(Index j=1; j<m_outerSize; ++j) |
475 | { |
476 | Index nextOldStart = m_outerIndex[j+1]; |
477 | Index offset = oldStart - m_outerIndex[j]; |
478 | if(offset>0) |
479 | { |
480 | for(Index k=0; k<m_innerNonZeros[j]; ++k) |
481 | { |
482 | m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k); |
483 | m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k); |
484 | } |
485 | } |
486 | m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j]; |
487 | oldStart = nextOldStart; |
488 | } |
489 | std::free(m_innerNonZeros); |
490 | m_innerNonZeros = 0; |
491 | m_data.resize(m_outerIndex[m_outerSize]); |
492 | m_data.squeeze(); |
493 | } |
494 | |
495 | /** Turns the matrix into the uncompressed mode */ |
496 | void uncompress() |
497 | { |
498 | if(m_innerNonZeros != 0) |
499 | return; |
500 | m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); |
501 | for (Index i = 0; i < m_outerSize; i++) |
502 | { |
503 | m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; |
504 | } |
505 | } |
506 | |
507 | /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */ |
508 | void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) |
509 | { |
510 | prune(default_prunning_func(reference,epsilon)); |
511 | } |
512 | |
513 | /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep. |
514 | * The functor type \a KeepFunc must implement the following function: |
515 | * \code |
516 | * bool operator() (const Index& row, const Index& col, const Scalar& value) const; |
517 | * \endcode |
518 | * \sa prune(Scalar,RealScalar) |
519 | */ |
520 | template<typename KeepFunc> |
521 | void prune(const KeepFunc& keep = KeepFunc()) |
522 | { |
523 | // TODO optimize the uncompressed mode to avoid moving and allocating the data twice |
524 | makeCompressed(); |
525 | |
526 | StorageIndex k = 0; |
527 | for(Index j=0; j<m_outerSize; ++j) |
528 | { |
529 | Index previousStart = m_outerIndex[j]; |
530 | m_outerIndex[j] = k; |
531 | Index end = m_outerIndex[j+1]; |
532 | for(Index i=previousStart; i<end; ++i) |
533 | { |
534 | if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i))) |
535 | { |
536 | m_data.value(k) = m_data.value(i); |
537 | m_data.index(k) = m_data.index(i); |
538 | ++k; |
539 | } |
540 | } |
541 | } |
542 | m_outerIndex[m_outerSize] = k; |
543 | m_data.resize(k,0); |
544 | } |
545 | |
546 | /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched. |
547 | * |
548 | * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode |
549 | * and the storage of the out of bounds coefficients is kept and reserved. |
550 | * Call makeCompressed() to pack the entries and squeeze extra memory. |
551 | * |
552 | * \sa reserve(), setZero(), makeCompressed() |
553 | */ |
554 | void conservativeResize(Index rows, Index cols) |
555 | { |
556 | // No change |
557 | if (this->rows() == rows && this->cols() == cols) return; |
558 | |
559 | // If one dimension is null, then there is nothing to be preserved |
560 | if(rows==0 || cols==0) return resize(rows,cols); |
561 | |
562 | Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows(); |
563 | Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); |
564 | StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows); |
565 | |
566 | // Deals with inner non zeros |
567 | if (m_innerNonZeros) |
568 | { |
569 | // Resize m_innerNonZeros |
570 | StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex))); |
571 | if (!newInnerNonZeros) internal::throw_std_bad_alloc(); |
572 | m_innerNonZeros = newInnerNonZeros; |
573 | |
574 | for(Index i=m_outerSize; i<m_outerSize+outerChange; i++) |
575 | m_innerNonZeros[i] = 0; |
576 | } |
577 | else if (innerChange < 0) |
578 | { |
579 | // Inner size decreased: allocate a new m_innerNonZeros |
580 | m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex))); |
581 | if (!m_innerNonZeros) internal::throw_std_bad_alloc(); |
582 | for(Index i = 0; i < m_outerSize; i++) |
583 | m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; |
584 | } |
585 | |
586 | // Change the m_innerNonZeros in case of a decrease of inner size |
587 | if (m_innerNonZeros && innerChange < 0) |
588 | { |
589 | for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) |
590 | { |
591 | StorageIndex &n = m_innerNonZeros[i]; |
592 | StorageIndex start = m_outerIndex[i]; |
593 | while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; |
594 | } |
595 | } |
596 | |
597 | m_innerSize = newInnerSize; |
598 | |
599 | // Re-allocate outer index structure if necessary |
600 | if (outerChange == 0) |
601 | return; |
602 | |
603 | StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex))); |
604 | if (!newOuterIndex) internal::throw_std_bad_alloc(); |
605 | m_outerIndex = newOuterIndex; |
606 | if (outerChange > 0) |
607 | { |
608 | StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; |
609 | for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++) |
610 | m_outerIndex[i] = last; |
611 | } |
612 | m_outerSize += outerChange; |
613 | } |
614 | |
615 | /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero. |
616 | * |
617 | * This function does not free the currently allocated memory. To release as much as memory as possible, |
618 | * call \code mat.data().squeeze(); \endcode after resizing it. |
619 | * |
620 | * \sa reserve(), setZero() |
621 | */ |
622 | void resize(Index rows, Index cols) |
623 | { |
624 | const Index outerSize = IsRowMajor ? rows : cols; |
625 | m_innerSize = IsRowMajor ? cols : rows; |
626 | m_data.clear(); |
627 | if (m_outerSize != outerSize || m_outerSize==0) |
628 | { |
629 | std::free(m_outerIndex); |
630 | m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex))); |
631 | if (!m_outerIndex) internal::throw_std_bad_alloc(); |
632 | |
633 | m_outerSize = outerSize; |
634 | } |
635 | if(m_innerNonZeros) |
636 | { |
637 | std::free(m_innerNonZeros); |
638 | m_innerNonZeros = 0; |
639 | } |
640 | memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex)); |
641 | } |
642 | |
643 | /** \internal |
644 | * Resize the nonzero vector to \a size */ |
645 | void resizeNonZeros(Index size) |
646 | { |
647 | m_data.resize(size); |
648 | } |
649 | |
650 | /** \returns a const expression of the diagonal coefficients. */ |
651 | const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); } |
652 | |
653 | /** \returns a read-write expression of the diagonal coefficients. |
654 | * \warning If the diagonal entries are written, then all diagonal |
655 | * entries \b must already exist, otherwise an assertion will be raised. |
656 | */ |
657 | DiagonalReturnType diagonal() { return DiagonalReturnType(*this); } |
658 | |
659 | /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */ |
660 | inline SparseMatrix() |
661 | : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) |
662 | { |
663 | check_template_parameters(); |
664 | resize(0, 0); |
665 | } |
666 | |
667 | /** Constructs a \a rows \c x \a cols empty matrix */ |
668 | inline SparseMatrix(Index rows, Index cols) |
669 | : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) |
670 | { |
671 | check_template_parameters(); |
672 | resize(rows, cols); |
673 | } |
674 | |
675 | /** Constructs a sparse matrix from the sparse expression \a other */ |
676 | template<typename OtherDerived> |
677 | inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other) |
678 | : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) |
679 | { |
680 | EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), |
681 | YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) |
682 | check_template_parameters(); |
683 | const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit); |
684 | if (needToTranspose) |
685 | *this = other.derived(); |
686 | else |
687 | { |
688 | #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN |
689 | EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN |
690 | #endif |
691 | internal::call_assignment_no_alias(*this, other.derived()); |
692 | } |
693 | } |
694 | |
695 | /** Constructs a sparse matrix from the sparse selfadjoint view \a other */ |
696 | template<typename OtherDerived, unsigned int UpLo> |
697 | inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other) |
698 | : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) |
699 | { |
700 | check_template_parameters(); |
701 | Base::operator=(other); |
702 | } |
703 | |
704 | /** Copy constructor (it performs a deep copy) */ |
705 | inline SparseMatrix(const SparseMatrix& other) |
706 | : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) |
707 | { |
708 | check_template_parameters(); |
709 | *this = other.derived(); |
710 | } |
711 | |
712 | /** \brief Copy constructor with in-place evaluation */ |
713 | template<typename OtherDerived> |
714 | SparseMatrix(const ReturnByValue<OtherDerived>& other) |
715 | : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) |
716 | { |
717 | check_template_parameters(); |
718 | initAssignment(other); |
719 | other.evalTo(*this); |
720 | } |
721 | |
722 | /** \brief Copy constructor with in-place evaluation */ |
723 | template<typename OtherDerived> |
724 | explicit SparseMatrix(const DiagonalBase<OtherDerived>& other) |
725 | : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) |
726 | { |
727 | check_template_parameters(); |
728 | *this = other.derived(); |
729 | } |
730 | |
731 | /** Swaps the content of two sparse matrices of the same type. |
732 | * This is a fast operation that simply swaps the underlying pointers and parameters. */ |
733 | inline void swap(SparseMatrix& other) |
734 | { |
735 | //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); |
736 | std::swap(m_outerIndex, other.m_outerIndex); |
737 | std::swap(m_innerSize, other.m_innerSize); |
738 | std::swap(m_outerSize, other.m_outerSize); |
739 | std::swap(m_innerNonZeros, other.m_innerNonZeros); |
740 | m_data.swap(other.m_data); |
741 | } |
742 | |
743 | /** Sets *this to the identity matrix. |
744 | * This function also turns the matrix into compressed mode, and drop any reserved memory. */ |
745 | inline void setIdentity() |
746 | { |
747 | eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES" ); |
748 | this->m_data.resize(rows()); |
749 | Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1)); |
750 | Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes(); |
751 | Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows())); |
752 | std::free(m_innerNonZeros); |
753 | m_innerNonZeros = 0; |
754 | } |
755 | inline SparseMatrix& operator=(const SparseMatrix& other) |
756 | { |
757 | if (other.isRValue()) |
758 | { |
759 | swap(other.const_cast_derived()); |
760 | } |
761 | else if(this!=&other) |
762 | { |
763 | #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN |
764 | EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN |
765 | #endif |
766 | initAssignment(other); |
767 | if(other.isCompressed()) |
768 | { |
769 | internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex); |
770 | m_data = other.m_data; |
771 | } |
772 | else |
773 | { |
774 | Base::operator=(other); |
775 | } |
776 | } |
777 | return *this; |
778 | } |
779 | |
780 | #ifndef EIGEN_PARSED_BY_DOXYGEN |
781 | template<typename OtherDerived> |
782 | inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other) |
783 | { return Base::operator=(other.derived()); } |
784 | #endif // EIGEN_PARSED_BY_DOXYGEN |
785 | |
786 | template<typename OtherDerived> |
787 | EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other); |
788 | |
789 | friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m) |
790 | { |
791 | EIGEN_DBG_SPARSE( |
792 | s << "Nonzero entries:\n" ; |
793 | if(m.isCompressed()) |
794 | { |
795 | for (Index i=0; i<m.nonZeros(); ++i) |
796 | s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") " ; |
797 | } |
798 | else |
799 | { |
800 | for (Index i=0; i<m.outerSize(); ++i) |
801 | { |
802 | Index p = m.m_outerIndex[i]; |
803 | Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i]; |
804 | Index k=p; |
805 | for (; k<pe; ++k) { |
806 | s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") " ; |
807 | } |
808 | for (; k<m.m_outerIndex[i+1]; ++k) { |
809 | s << "(_,_) " ; |
810 | } |
811 | } |
812 | } |
813 | s << std::endl; |
814 | s << std::endl; |
815 | s << "Outer pointers:\n" ; |
816 | for (Index i=0; i<m.outerSize(); ++i) { |
817 | s << m.m_outerIndex[i] << " " ; |
818 | } |
819 | s << " $" << std::endl; |
820 | if(!m.isCompressed()) |
821 | { |
822 | s << "Inner non zeros:\n" ; |
823 | for (Index i=0; i<m.outerSize(); ++i) { |
824 | s << m.m_innerNonZeros[i] << " " ; |
825 | } |
826 | s << " $" << std::endl; |
827 | } |
828 | s << std::endl; |
829 | ); |
830 | s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m); |
831 | return s; |
832 | } |
833 | |
834 | /** Destructor */ |
835 | inline ~SparseMatrix() |
836 | { |
837 | std::free(m_outerIndex); |
838 | std::free(m_innerNonZeros); |
839 | } |
840 | |
841 | /** Overloaded for performance */ |
842 | Scalar sum() const; |
843 | |
844 | # ifdef EIGEN_SPARSEMATRIX_PLUGIN |
845 | # include EIGEN_SPARSEMATRIX_PLUGIN |
846 | # endif |
847 | |
848 | protected: |
849 | |
850 | template<typename Other> |
851 | void initAssignment(const Other& other) |
852 | { |
853 | resize(other.rows(), other.cols()); |
854 | if(m_innerNonZeros) |
855 | { |
856 | std::free(m_innerNonZeros); |
857 | m_innerNonZeros = 0; |
858 | } |
859 | } |
860 | |
861 | /** \internal |
862 | * \sa insert(Index,Index) */ |
863 | EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col); |
864 | |
865 | /** \internal |
866 | * A vector object that is equal to 0 everywhere but v at the position i */ |
867 | class SingletonVector |
868 | { |
869 | StorageIndex m_index; |
870 | StorageIndex m_value; |
871 | public: |
872 | typedef StorageIndex value_type; |
873 | SingletonVector(Index i, Index v) |
874 | : m_index(convert_index(i)), m_value(convert_index(v)) |
875 | {} |
876 | |
877 | StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; } |
878 | }; |
879 | |
880 | /** \internal |
881 | * \sa insert(Index,Index) */ |
882 | EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col); |
883 | |
884 | public: |
885 | /** \internal |
886 | * \sa insert(Index,Index) */ |
887 | EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) |
888 | { |
889 | const Index outer = IsRowMajor ? row : col; |
890 | const Index inner = IsRowMajor ? col : row; |
891 | |
892 | eigen_assert(!isCompressed()); |
893 | eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); |
894 | |
895 | Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; |
896 | m_data.index(p) = convert_index(inner); |
897 | return (m_data.value(p) = Scalar(0)); |
898 | } |
899 | |
900 | private: |
901 | static void check_template_parameters() |
902 | { |
903 | EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); |
904 | EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); |
905 | } |
906 | |
907 | struct default_prunning_func { |
908 | default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {} |
909 | inline bool operator() (const Index&, const Index&, const Scalar& value) const |
910 | { |
911 | return !internal::isMuchSmallerThan(value, reference, epsilon); |
912 | } |
913 | Scalar reference; |
914 | RealScalar epsilon; |
915 | }; |
916 | }; |
917 | |
918 | namespace internal { |
919 | |
920 | template<typename InputIterator, typename SparseMatrixType, typename DupFunctor> |
921 | void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func) |
922 | { |
923 | enum { IsRowMajor = SparseMatrixType::IsRowMajor }; |
924 | typedef typename SparseMatrixType::Scalar Scalar; |
925 | typedef typename SparseMatrixType::StorageIndex StorageIndex; |
926 | SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols()); |
927 | |
928 | if(begin!=end) |
929 | { |
930 | // pass 1: count the nnz per inner-vector |
931 | typename SparseMatrixType::IndexVector wi(trMat.outerSize()); |
932 | wi.setZero(); |
933 | for(InputIterator it(begin); it!=end; ++it) |
934 | { |
935 | eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols()); |
936 | wi(IsRowMajor ? it->col() : it->row())++; |
937 | } |
938 | |
939 | // pass 2: insert all the elements into trMat |
940 | trMat.reserve(wi); |
941 | for(InputIterator it(begin); it!=end; ++it) |
942 | trMat.insertBackUncompressed(it->row(),it->col()) = it->value(); |
943 | |
944 | // pass 3: |
945 | trMat.collapseDuplicates(dup_func); |
946 | } |
947 | |
948 | // pass 4: transposed copy -> implicit sorting |
949 | mat = trMat; |
950 | } |
951 | |
952 | } |
953 | |
954 | |
955 | /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end. |
956 | * |
957 | * A \em triplet is a tuple (i,j,value) defining a non-zero element. |
958 | * The input list of triplets does not have to be sorted, and can contains duplicated elements. |
959 | * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up. |
960 | * This is a \em O(n) operation, with \em n the number of triplet elements. |
961 | * The initial contents of \c *this is destroyed. |
962 | * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor, |
963 | * or the resize(Index,Index) method. The sizes are not extracted from the triplet list. |
964 | * |
965 | * The \a InputIterators value_type must provide the following interface: |
966 | * \code |
967 | * Scalar value() const; // the value |
968 | * Scalar row() const; // the row index i |
969 | * Scalar col() const; // the column index j |
970 | * \endcode |
971 | * See for instance the Eigen::Triplet template class. |
972 | * |
973 | * Here is a typical usage example: |
974 | * \code |
975 | typedef Triplet<double> T; |
976 | std::vector<T> tripletList; |
977 | triplets.reserve(estimation_of_entries); |
978 | for(...) |
979 | { |
980 | // ... |
981 | tripletList.push_back(T(i,j,v_ij)); |
982 | } |
983 | SparseMatrixType m(rows,cols); |
984 | m.setFromTriplets(tripletList.begin(), tripletList.end()); |
985 | // m is ready to go! |
986 | * \endcode |
987 | * |
988 | * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define |
989 | * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather |
990 | * be explicitely stored into a std::vector for instance. |
991 | */ |
992 | template<typename Scalar, int _Options, typename _StorageIndex> |
993 | template<typename InputIterators> |
994 | void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end) |
995 | { |
996 | internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>()); |
997 | } |
998 | |
999 | /** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied: |
1000 | * \code |
1001 | * value = dup_func(OldValue, NewValue) |
1002 | * \endcode |
1003 | * Here is a C++11 example keeping the latest entry only: |
1004 | * \code |
1005 | * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; }); |
1006 | * \endcode |
1007 | */ |
1008 | template<typename Scalar, int _Options, typename _StorageIndex> |
1009 | template<typename InputIterators,typename DupFunctor> |
1010 | void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func) |
1011 | { |
1012 | internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func); |
1013 | } |
1014 | |
1015 | /** \internal */ |
1016 | template<typename Scalar, int _Options, typename _StorageIndex> |
1017 | template<typename DupFunctor> |
1018 | void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func) |
1019 | { |
1020 | eigen_assert(!isCompressed()); |
1021 | // TODO, in practice we should be able to use m_innerNonZeros for that task |
1022 | IndexVector wi(innerSize()); |
1023 | wi.fill(-1); |
1024 | StorageIndex count = 0; |
1025 | // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers |
1026 | for(Index j=0; j<outerSize(); ++j) |
1027 | { |
1028 | StorageIndex start = count; |
1029 | Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j]; |
1030 | for(Index k=m_outerIndex[j]; k<oldEnd; ++k) |
1031 | { |
1032 | Index i = m_data.index(k); |
1033 | if(wi(i)>=start) |
1034 | { |
1035 | // we already meet this entry => accumulate it |
1036 | m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k)); |
1037 | } |
1038 | else |
1039 | { |
1040 | m_data.value(count) = m_data.value(k); |
1041 | m_data.index(count) = m_data.index(k); |
1042 | wi(i) = count; |
1043 | ++count; |
1044 | } |
1045 | } |
1046 | m_outerIndex[j] = start; |
1047 | } |
1048 | m_outerIndex[m_outerSize] = count; |
1049 | |
1050 | // turn the matrix into compressed form |
1051 | std::free(m_innerNonZeros); |
1052 | m_innerNonZeros = 0; |
1053 | m_data.resize(m_outerIndex[m_outerSize]); |
1054 | } |
1055 | |
1056 | template<typename Scalar, int _Options, typename _StorageIndex> |
1057 | template<typename OtherDerived> |
1058 | EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other) |
1059 | { |
1060 | EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), |
1061 | YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) |
1062 | |
1063 | #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN |
1064 | EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN |
1065 | #endif |
1066 | |
1067 | const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit); |
1068 | if (needToTranspose) |
1069 | { |
1070 | #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN |
1071 | EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN |
1072 | #endif |
1073 | // two passes algorithm: |
1074 | // 1 - compute the number of coeffs per dest inner vector |
1075 | // 2 - do the actual copy/eval |
1076 | // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed |
1077 | typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy; |
1078 | typedef typename internal::remove_all<OtherCopy>::type _OtherCopy; |
1079 | typedef internal::evaluator<_OtherCopy> OtherCopyEval; |
1080 | OtherCopy otherCopy(other.derived()); |
1081 | OtherCopyEval otherCopyEval(otherCopy); |
1082 | |
1083 | SparseMatrix dest(other.rows(),other.cols()); |
1084 | Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero(); |
1085 | |
1086 | // pass 1 |
1087 | // FIXME the above copy could be merged with that pass |
1088 | for (Index j=0; j<otherCopy.outerSize(); ++j) |
1089 | for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) |
1090 | ++dest.m_outerIndex[it.index()]; |
1091 | |
1092 | // prefix sum |
1093 | StorageIndex count = 0; |
1094 | IndexVector positions(dest.outerSize()); |
1095 | for (Index j=0; j<dest.outerSize(); ++j) |
1096 | { |
1097 | StorageIndex tmp = dest.m_outerIndex[j]; |
1098 | dest.m_outerIndex[j] = count; |
1099 | positions[j] = count; |
1100 | count += tmp; |
1101 | } |
1102 | dest.m_outerIndex[dest.outerSize()] = count; |
1103 | // alloc |
1104 | dest.m_data.resize(count); |
1105 | // pass 2 |
1106 | for (StorageIndex j=0; j<otherCopy.outerSize(); ++j) |
1107 | { |
1108 | for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it) |
1109 | { |
1110 | Index pos = positions[it.index()]++; |
1111 | dest.m_data.index(pos) = j; |
1112 | dest.m_data.value(pos) = it.value(); |
1113 | } |
1114 | } |
1115 | this->swap(dest); |
1116 | return *this; |
1117 | } |
1118 | else |
1119 | { |
1120 | if(other.isRValue()) |
1121 | { |
1122 | initAssignment(other.derived()); |
1123 | } |
1124 | // there is no special optimization |
1125 | return Base::operator=(other.derived()); |
1126 | } |
1127 | } |
1128 | |
1129 | template<typename _Scalar, int _Options, typename _StorageIndex> |
1130 | typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col) |
1131 | { |
1132 | eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); |
1133 | |
1134 | const Index outer = IsRowMajor ? row : col; |
1135 | const Index inner = IsRowMajor ? col : row; |
1136 | |
1137 | if(isCompressed()) |
1138 | { |
1139 | if(nonZeros()==0) |
1140 | { |
1141 | // reserve space if not already done |
1142 | if(m_data.allocatedSize()==0) |
1143 | m_data.reserve(2*m_innerSize); |
1144 | |
1145 | // turn the matrix into non-compressed mode |
1146 | m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); |
1147 | if(!m_innerNonZeros) internal::throw_std_bad_alloc(); |
1148 | |
1149 | memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex)); |
1150 | |
1151 | // pack all inner-vectors to the end of the pre-allocated space |
1152 | // and allocate the entire free-space to the first inner-vector |
1153 | StorageIndex end = convert_index(m_data.allocatedSize()); |
1154 | for(Index j=1; j<=m_outerSize; ++j) |
1155 | m_outerIndex[j] = end; |
1156 | } |
1157 | else |
1158 | { |
1159 | // turn the matrix into non-compressed mode |
1160 | m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex))); |
1161 | if(!m_innerNonZeros) internal::throw_std_bad_alloc(); |
1162 | for(Index j=0; j<m_outerSize; ++j) |
1163 | m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j]; |
1164 | } |
1165 | } |
1166 | |
1167 | // check whether we can do a fast "push back" insertion |
1168 | Index data_end = m_data.allocatedSize(); |
1169 | |
1170 | // First case: we are filling a new inner vector which is packed at the end. |
1171 | // We assume that all remaining inner-vectors are also empty and packed to the end. |
1172 | if(m_outerIndex[outer]==data_end) |
1173 | { |
1174 | eigen_internal_assert(m_innerNonZeros[outer]==0); |
1175 | |
1176 | // pack previous empty inner-vectors to end of the used-space |
1177 | // and allocate the entire free-space to the current inner-vector. |
1178 | StorageIndex p = convert_index(m_data.size()); |
1179 | Index j = outer; |
1180 | while(j>=0 && m_innerNonZeros[j]==0) |
1181 | m_outerIndex[j--] = p; |
1182 | |
1183 | // push back the new element |
1184 | ++m_innerNonZeros[outer]; |
1185 | m_data.append(Scalar(0), inner); |
1186 | |
1187 | // check for reallocation |
1188 | if(data_end != m_data.allocatedSize()) |
1189 | { |
1190 | // m_data has been reallocated |
1191 | // -> move remaining inner-vectors back to the end of the free-space |
1192 | // so that the entire free-space is allocated to the current inner-vector. |
1193 | eigen_internal_assert(data_end < m_data.allocatedSize()); |
1194 | StorageIndex new_end = convert_index(m_data.allocatedSize()); |
1195 | for(Index k=outer+1; k<=m_outerSize; ++k) |
1196 | if(m_outerIndex[k]==data_end) |
1197 | m_outerIndex[k] = new_end; |
1198 | } |
1199 | return m_data.value(p); |
1200 | } |
1201 | |
1202 | // Second case: the next inner-vector is packed to the end |
1203 | // and the current inner-vector end match the used-space. |
1204 | if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size()) |
1205 | { |
1206 | eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0); |
1207 | |
1208 | // add space for the new element |
1209 | ++m_innerNonZeros[outer]; |
1210 | m_data.resize(m_data.size()+1); |
1211 | |
1212 | // check for reallocation |
1213 | if(data_end != m_data.allocatedSize()) |
1214 | { |
1215 | // m_data has been reallocated |
1216 | // -> move remaining inner-vectors back to the end of the free-space |
1217 | // so that the entire free-space is allocated to the current inner-vector. |
1218 | eigen_internal_assert(data_end < m_data.allocatedSize()); |
1219 | StorageIndex new_end = convert_index(m_data.allocatedSize()); |
1220 | for(Index k=outer+1; k<=m_outerSize; ++k) |
1221 | if(m_outerIndex[k]==data_end) |
1222 | m_outerIndex[k] = new_end; |
1223 | } |
1224 | |
1225 | // and insert it at the right position (sorted insertion) |
1226 | Index startId = m_outerIndex[outer]; |
1227 | Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1; |
1228 | while ( (p > startId) && (m_data.index(p-1) > inner) ) |
1229 | { |
1230 | m_data.index(p) = m_data.index(p-1); |
1231 | m_data.value(p) = m_data.value(p-1); |
1232 | --p; |
1233 | } |
1234 | |
1235 | m_data.index(p) = convert_index(inner); |
1236 | return (m_data.value(p) = 0); |
1237 | } |
1238 | |
1239 | if(m_data.size() != m_data.allocatedSize()) |
1240 | { |
1241 | // make sure the matrix is compatible to random un-compressed insertion: |
1242 | m_data.resize(m_data.allocatedSize()); |
1243 | this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2)); |
1244 | } |
1245 | |
1246 | return insertUncompressed(row,col); |
1247 | } |
1248 | |
1249 | template<typename _Scalar, int _Options, typename _StorageIndex> |
1250 | EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col) |
1251 | { |
1252 | eigen_assert(!isCompressed()); |
1253 | |
1254 | const Index outer = IsRowMajor ? row : col; |
1255 | const StorageIndex inner = convert_index(IsRowMajor ? col : row); |
1256 | |
1257 | Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; |
1258 | StorageIndex innerNNZ = m_innerNonZeros[outer]; |
1259 | if(innerNNZ>=room) |
1260 | { |
1261 | // this inner vector is full, we need to reallocate the whole buffer :( |
1262 | reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ))); |
1263 | } |
1264 | |
1265 | Index startId = m_outerIndex[outer]; |
1266 | Index p = startId + m_innerNonZeros[outer]; |
1267 | while ( (p > startId) && (m_data.index(p-1) > inner) ) |
1268 | { |
1269 | m_data.index(p) = m_data.index(p-1); |
1270 | m_data.value(p) = m_data.value(p-1); |
1271 | --p; |
1272 | } |
1273 | eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end" ); |
1274 | |
1275 | m_innerNonZeros[outer]++; |
1276 | |
1277 | m_data.index(p) = inner; |
1278 | return (m_data.value(p) = Scalar(0)); |
1279 | } |
1280 | |
1281 | template<typename _Scalar, int _Options, typename _StorageIndex> |
1282 | EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col) |
1283 | { |
1284 | eigen_assert(isCompressed()); |
1285 | |
1286 | const Index outer = IsRowMajor ? row : col; |
1287 | const Index inner = IsRowMajor ? col : row; |
1288 | |
1289 | Index previousOuter = outer; |
1290 | if (m_outerIndex[outer+1]==0) |
1291 | { |
1292 | // we start a new inner vector |
1293 | while (previousOuter>=0 && m_outerIndex[previousOuter]==0) |
1294 | { |
1295 | m_outerIndex[previousOuter] = convert_index(m_data.size()); |
1296 | --previousOuter; |
1297 | } |
1298 | m_outerIndex[outer+1] = m_outerIndex[outer]; |
1299 | } |
1300 | |
1301 | // here we have to handle the tricky case where the outerIndex array |
1302 | // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., |
1303 | // the 2nd inner vector... |
1304 | bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) |
1305 | && (std::size_t(m_outerIndex[outer+1]) == m_data.size()); |
1306 | |
1307 | std::size_t startId = m_outerIndex[outer]; |
1308 | // FIXME let's make sure sizeof(long int) == sizeof(std::size_t) |
1309 | std::size_t p = m_outerIndex[outer+1]; |
1310 | ++m_outerIndex[outer+1]; |
1311 | |
1312 | double reallocRatio = 1; |
1313 | if (m_data.allocatedSize()<=m_data.size()) |
1314 | { |
1315 | // if there is no preallocated memory, let's reserve a minimum of 32 elements |
1316 | if (m_data.size()==0) |
1317 | { |
1318 | m_data.reserve(32); |
1319 | } |
1320 | else |
1321 | { |
1322 | // we need to reallocate the data, to reduce multiple reallocations |
1323 | // we use a smart resize algorithm based on the current filling ratio |
1324 | // in addition, we use double to avoid integers overflows |
1325 | double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1); |
1326 | reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size()); |
1327 | // furthermore we bound the realloc ratio to: |
1328 | // 1) reduce multiple minor realloc when the matrix is almost filled |
1329 | // 2) avoid to allocate too much memory when the matrix is almost empty |
1330 | reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.); |
1331 | } |
1332 | } |
1333 | m_data.resize(m_data.size()+1,reallocRatio); |
1334 | |
1335 | if (!isLastVec) |
1336 | { |
1337 | if (previousOuter==-1) |
1338 | { |
1339 | // oops wrong guess. |
1340 | // let's correct the outer offsets |
1341 | for (Index k=0; k<=(outer+1); ++k) |
1342 | m_outerIndex[k] = 0; |
1343 | Index k=outer+1; |
1344 | while(m_outerIndex[k]==0) |
1345 | m_outerIndex[k++] = 1; |
1346 | while (k<=m_outerSize && m_outerIndex[k]!=0) |
1347 | m_outerIndex[k++]++; |
1348 | p = 0; |
1349 | --k; |
1350 | k = m_outerIndex[k]-1; |
1351 | while (k>0) |
1352 | { |
1353 | m_data.index(k) = m_data.index(k-1); |
1354 | m_data.value(k) = m_data.value(k-1); |
1355 | k--; |
1356 | } |
1357 | } |
1358 | else |
1359 | { |
1360 | // we are not inserting into the last inner vec |
1361 | // update outer indices: |
1362 | Index j = outer+2; |
1363 | while (j<=m_outerSize && m_outerIndex[j]!=0) |
1364 | m_outerIndex[j++]++; |
1365 | --j; |
1366 | // shift data of last vecs: |
1367 | Index k = m_outerIndex[j]-1; |
1368 | while (k>=Index(p)) |
1369 | { |
1370 | m_data.index(k) = m_data.index(k-1); |
1371 | m_data.value(k) = m_data.value(k-1); |
1372 | k--; |
1373 | } |
1374 | } |
1375 | } |
1376 | |
1377 | while ( (p > startId) && (m_data.index(p-1) > inner) ) |
1378 | { |
1379 | m_data.index(p) = m_data.index(p-1); |
1380 | m_data.value(p) = m_data.value(p-1); |
1381 | --p; |
1382 | } |
1383 | |
1384 | m_data.index(p) = inner; |
1385 | return (m_data.value(p) = Scalar(0)); |
1386 | } |
1387 | |
1388 | namespace internal { |
1389 | |
1390 | template<typename _Scalar, int _Options, typename _StorageIndex> |
1391 | struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> > |
1392 | : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > |
1393 | { |
1394 | typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base; |
1395 | typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType; |
1396 | evaluator() : Base() {} |
1397 | explicit evaluator(const SparseMatrixType &mat) : Base(mat) {} |
1398 | }; |
1399 | |
1400 | } |
1401 | |
1402 | } // end namespace Eigen |
1403 | |
1404 | #endif // EIGEN_SPARSEMATRIX_H |
1405 | |