Line data Source code
1 : /*
2 : * File: Matrix.cpp
3 : * Author: Pantelis Sopasakis
4 : *
5 : * Created on June 30, 2015, 12:34 PM
6 : *
7 : * ForBES is free software: you can redistribute it and/or modify
8 : * it under the terms of the GNU Lesser General Public License as published by
9 : * the Free Software Foundation, either version 3 of the License, or
10 : * (at your option) any later version.
11 : *
12 : * ForBES is distributed in the hope that it will be useful,
13 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 : * GNU Lesser General Public License for more details.
16 : *
17 : * You should have received a copy of the GNU Lesser General Public License
18 : * along with ForBES. If not, see <http://www.gnu.org/licenses/>.
19 : */
20 :
21 : #include "Matrix.h"
22 : #include <iostream>
23 : #include <stdexcept>
24 : #include <complex>
25 : #include <cstdlib>
26 : #include <stdlib.h>
27 : #include <iomanip>
28 : #include <cmath>
29 : #include <cstring>
30 : #include <assert.h>
31 : #include <limits>
32 :
33 : #ifdef USE_LIBS
34 : #include <cblas.h>
35 : #include <lapacke.h>
36 : #endif
37 :
38 : /* STATIC MEMBERS */
39 :
40 : cholmod_common* Matrix::ms_singleton = NULL;
41 :
42 88825 : cholmod_common* Matrix::cholmod_handle() {
43 88825 : if (ms_singleton == NULL) {
44 53 : ms_singleton = new cholmod_common;
45 53 : cholmod_start(ms_singleton);
46 : }
47 88825 : return ms_singleton;
48 : }
49 :
50 140 : int Matrix::destroy_handle() {
51 140 : if (ms_singleton == NULL) {
52 92 : return 0;
53 : }
54 48 : int status = cholmod_finish(ms_singleton);
55 48 : ms_singleton = NULL;
56 48 : return status;
57 : }
58 :
59 : /********* CONSTRUCTORS ************/
60 129332 : Matrix::Matrix() {
61 129332 : m_nrows = 0;
62 129332 : m_ncols = 0;
63 129332 : m_data = new double[1];
64 129332 : *m_data = 0;
65 129332 : m_type = MATRIX_DENSE;
66 129332 : m_dataLength = 0;
67 129332 : m_transpose = false;
68 129332 : m_triplet = NULL;
69 129332 : m_sparse = NULL;
70 129332 : m_dense = NULL;
71 129332 : m_sparseStorageType = CHOLMOD_TYPE_TRIPLET;
72 129332 : m_delete_data = true;
73 129332 : }
74 :
75 12 : Matrix::Matrix(std::pair<size_t, size_t> dimensions) {
76 12 : init(dimensions.first, dimensions.second, MATRIX_DENSE);
77 12 : }
78 :
79 285083 : Matrix::Matrix(size_t nr, size_t nc) {
80 285083 : init(nr, nc, MATRIX_DENSE);
81 285083 : }
82 :
83 342246 : Matrix::Matrix(size_t nr, size_t nc, MatrixType mType) {
84 342246 : init(nr, nc, mType);
85 342243 : }
86 :
87 1024 : Matrix::Matrix(size_t nr, size_t nc, const double * dat) {
88 1024 : init(nr, nc, MATRIX_DENSE);
89 6417 : for (size_t j = 0; j < nc * nr; j++) {
90 5393 : m_data[j] = dat[j];
91 : }
92 1024 : }
93 :
94 12 : Matrix::Matrix(size_t nr, size_t nc, const double * dat, MatrixType mType) {
95 12 : init(nr, nc, mType);
96 159 : for (size_t j = 0; j < length(); j++) {
97 147 : m_data[j] = dat[j];
98 : }
99 12 : }
100 :
101 545429 : Matrix::Matrix(const Matrix& orig) {
102 545429 : m_ncols = orig.m_ncols;
103 545429 : m_nrows = orig.m_nrows;
104 545429 : m_transpose = orig.m_transpose;
105 545429 : m_data = NULL;
106 545429 : m_delete_data = orig.m_delete_data;
107 545429 : m_triplet = NULL;
108 545429 : m_sparse = NULL;
109 545429 : m_dense = NULL;
110 545429 : m_type = orig.m_type;
111 545429 : if (orig.m_type != MATRIX_SPARSE) {
112 536334 : size_t n = orig.m_dataLength;
113 536334 : if (n == 0) {
114 1 : n = 1;
115 : }
116 536334 : m_data = new double[n];
117 536334 : memcpy(m_data, orig.m_data, n * sizeof (double));
118 536334 : m_dataLength = orig.m_dataLength;
119 536334 : m_delete_data = true;
120 : } else {
121 9095 : if (orig.m_triplet != NULL) {
122 9095 : m_triplet = cholmod_copy_triplet(orig.m_triplet, Matrix::cholmod_handle());
123 : }
124 9095 : if (orig.m_sparse != NULL) {
125 3012 : m_sparse = cholmod_copy_sparse(orig.m_sparse, Matrix::cholmod_handle());
126 : }
127 9095 : if (orig.m_dense != NULL) {
128 0 : m_dense = cholmod_copy_dense(orig.m_dense, Matrix::cholmod_handle());
129 : }
130 : }
131 545429 : m_sparseStorageType = orig.m_sparseStorageType;
132 545429 : }
133 :
134 : /********* DENSTRUCTOR ************/
135 1339767 : Matrix::~Matrix() {
136 1304283 : m_ncols = 0;
137 1304283 : m_nrows = 0;
138 1304283 : if (m_data != NULL && m_delete_data) {
139 1283202 : delete[] m_data;
140 : }
141 1304283 : m_data = NULL; /* so that we don't double-free */
142 1304283 : m_delete_data = false; /* for extra safety (just in case) */
143 1304283 : if (m_triplet != NULL) {
144 19398 : cholmod_free_triplet(&m_triplet, Matrix::cholmod_handle());
145 19398 : m_triplet = NULL;
146 : }
147 1304283 : if (m_sparse != NULL) {
148 13976 : cholmod_free_sparse(&m_sparse, Matrix::cholmod_handle());
149 13976 : m_sparse = NULL;
150 : }
151 1304283 : if (m_dense != NULL) {
152 125 : m_dense->x = NULL;
153 125 : cholmod_free_dense(&m_dense, Matrix::cholmod_handle());
154 125 : m_dense = NULL;
155 : }
156 1339767 : }
157 :
158 : /********* GETTERS/SETTERS ************/
159 7358098 : size_t Matrix::getNcols() const {
160 7358098 : return m_ncols;
161 : }
162 :
163 8103910 : size_t Matrix::getNrows() const {
164 8103910 : return m_nrows;
165 : }
166 :
167 522 : double * Matrix::getData() {
168 522 : return m_data;
169 : }
170 :
171 5517724 : bool Matrix::isEmpty() const {
172 5517724 : return (m_nrows == 0 || m_ncols == 0);
173 : }
174 :
175 498118 : bool Matrix::isColumnVector() const {
176 498118 : return this -> m_ncols == 1;
177 : }
178 :
179 2 : bool Matrix::isRowVector() const {
180 2 : return this -> m_nrows == 1;
181 : }
182 :
183 972067 : size_t Matrix::length() const {
184 972067 : return m_dataLength;
185 : }
186 :
187 : /********* OTHER METHODS ************/
188 :
189 117558 : void Matrix::transpose() {
190 117558 : if (m_type == MATRIX_DIAGONAL || m_type == MATRIX_SYMMETRIC) {
191 117562 : return;
192 : }
193 :
194 117554 : if (m_type == MATRIX_SPARSE) {
195 1571 : _createSparse();
196 1571 : m_sparse = cholmod_transpose(m_sparse, 1, cholmod_handle());
197 : }
198 117554 : if (this -> m_transpose) {
199 57758 : this -> m_transpose = false;
200 : } else {
201 59796 : this -> m_transpose = true;
202 : }
203 117554 : std::swap(this -> m_ncols, this -> m_nrows);
204 : }
205 :
206 6 : int Matrix::reshape(size_t nrows, size_t ncols) {
207 6 : size_t new_size = nrows * ncols;
208 6 : if (new_size == 0) {
209 1 : return -1;
210 : }
211 5 : if (new_size > length()) {
212 1 : return -2;
213 : }
214 4 : this -> m_nrows = nrows;
215 4 : this -> m_ncols = ncols;
216 4 : return 0;
217 : }
218 :
219 4036 : double Matrix::get(const size_t i) const {
220 4036 : return m_data[i];
221 : }
222 :
223 5517651 : double Matrix::get(const size_t i, const size_t j) const {
224 : //LCOV_EXCL_START
225 : if (isEmpty()) {
226 : throw std::out_of_range("Method get(size_t, size_t) applied to an empty matrix");
227 : }
228 : if (i >= getNrows() || j >= getNcols()) {
229 : throw std::out_of_range("Index out of range!");
230 : }
231 : //LCOV_EXCL_STOP
232 5517649 : if (m_type == MATRIX_DENSE) {
233 2346914 : return !m_transpose ? m_data[i + j * m_nrows] : m_data[j + i * m_ncols];
234 3170735 : } else if (m_type == MATRIX_DIAGONAL) {
235 144070 : if (i == j) {
236 39948 : return m_data[i];
237 : } else {
238 104122 : return 0.0;
239 : }
240 3026665 : } else if (m_type == MATRIX_SYMMETRIC) {
241 231587 : size_t i_ = std::max(i, j);
242 231587 : size_t j_ = std::min(i, j);
243 231587 : return m_data[i_ + m_nrows * j_ - j_ * (j_ + 1) / 2];
244 2795078 : } else if (m_type == MATRIX_LOWERTR) {
245 : return m_transpose
246 420 : ? (j >= i) ? m_data[j + m_ncols * i - i * (i + 1) / 2] : 0.0
247 116182 : : (i >= j) ? m_data[i + m_nrows * j - j * (j + 1) / 2] : 0.0;
248 : } else {
249 : /* if (m_type == MATRIX_SPARSE) */
250 : //LCOV_EXCL_START
251 : if (m_triplet == NULL) {
252 : throw std::logic_error("not supported yet");
253 : }
254 : //LCOV_EXCL_STOP
255 2679316 : double val = 0.0;
256 2679316 : int i_ = m_transpose ? j : i;
257 2679316 : int j_ = m_transpose ? i : j;
258 215269400 : for (size_t k = 0; k < m_triplet->nnz; k++) {
259 213854087 : if (i_ == (static_cast<int*> (m_triplet->i))[k]) {
260 7394780 : if (j_ == (static_cast<int*> (m_triplet->j))[k]) {
261 1264003 : val = (static_cast<double*> (m_triplet->x))[k];
262 1264003 : break;
263 : }
264 : }
265 : }
266 2679316 : return val;
267 : }
268 :
269 : } /* END GET */
270 :
271 756262 : void Matrix::set(size_t i, size_t j, double v) {
272 : //LCOV_EXCL_START
273 : if (!indexWithinBounds(i, j)) {
274 : throw std::out_of_range("Index out of range!");
275 : }
276 : //LCOV_EXCL_STOP
277 756261 : if (m_type == MATRIX_DENSE) {
278 393947 : if (m_transpose) {
279 6061 : m_data[j + i * m_ncols] = v;
280 : } else {
281 387886 : m_data[i + j * m_nrows] = v;
282 : }
283 362314 : } else if (m_type == MATRIX_DIAGONAL && i == j) {
284 821 : m_data[i] = v;
285 361493 : } else if (m_type == MATRIX_LOWERTR) {
286 1502 : m_data[i + m_nrows * j - j * (j + 1) / 2] = v;
287 359991 : } else if (m_type == MATRIX_SYMMETRIC) { /* sets A(i,j) = A(j,i) = v */
288 315 : int i_ = std::max(i, j);
289 315 : int j_ = std::min(i, j);
290 315 : m_data[i_ + m_nrows * j_ - j_ * (j_ + 1) / 2] = v;
291 359676 : } else if (m_type == MATRIX_SPARSE) {
292 359675 : if (m_triplet == NULL) { /* Create triplets if they don't exist */
293 0 : _createTriplet();
294 : }
295 359675 : if (m_triplet->nnz == m_triplet->nzmax) { /* max NNZ exceeded */
296 335 : cholmod_reallocate_triplet(m_triplet->nzmax + 1, m_triplet, Matrix::cholmod_handle());
297 : }
298 :
299 359675 : int k_found = -1;
300 11963141 : for (size_t s = 0; s < m_triplet->nnz; s++) {
301 11603522 : if (i == (static_cast<int*> (m_triplet->i))[s] && j == (static_cast<int*> (m_triplet->j))[s]) {
302 56 : k_found = s;
303 56 : break;
304 : }
305 : }
306 :
307 359675 : if (k_found == -1) {
308 359619 : (static_cast<int*> (m_triplet->i))[m_triplet->nnz] = i;
309 359619 : (static_cast<int*> (m_triplet->j))[m_triplet->nnz] = j;
310 359619 : (static_cast<double*> (m_triplet->x))[m_triplet->nnz] = v;
311 359619 : (m_triplet->nnz)++;
312 : } else {
313 56 : (static_cast<int*> (m_triplet->i))[k_found] = i;
314 56 : (static_cast<int*> (m_triplet->j))[k_found] = j;
315 56 : (static_cast<double*> (m_triplet->x))[k_found] = v;
316 : }
317 : /* Invalidate alternative sparse representations */
318 359675 : if (m_sparse != NULL) {
319 2 : cholmod_free_sparse(&m_sparse, Matrix::cholmod_handle());
320 : }
321 359675 : if (m_dense != NULL) {
322 0 : cholmod_free_dense(&m_dense, Matrix::cholmod_handle());
323 : }
324 359675 : m_sparse = NULL;
325 359675 : m_dense = NULL;
326 : } else {
327 : //LCOV_EXCL_START
328 : throw std::invalid_argument("Illegal operation");
329 : //LCOV_EXCL_STOP
330 : }
331 :
332 756260 : }
333 :
334 81142 : double Matrix::norm_fro_sq() {
335 81142 : if (m_type == Matrix::MATRIX_DENSE
336 0 : || m_type == Matrix::MATRIX_DIAGONAL
337 0 : || m_type == Matrix::MATRIX_LOWERTR) {
338 81142 : return cblas_dnrm2(m_dataLength, m_data, 1);
339 0 : } else if (m_type == Matrix::MATRIX_SYMMETRIC) {
340 0 : double t = 0.0;
341 0 : for (size_t i = 0; i < m_nrows; i++) {
342 : /* do for all i > j */
343 0 : for (size_t j = 0; j < i; j++) {
344 0 : t += 2.0 * std::pow(m_data[i + m_nrows * j - j * (j + 1) / 2], 2);
345 : }
346 : /* and now for j = i */
347 0 : t += std::pow(m_data[i + m_nrows * i - i * (i + 1) / 2], 2);
348 : }
349 0 : return std::sqrt(t);
350 : } else {
351 0 : double * sparse_data = static_cast<double*> (m_triplet->x);
352 0 : return cblas_dnrm2(m_triplet->nnz, sparse_data, 1);
353 : }
354 : }
355 :
356 258 : double Matrix::quadFromTriplet(const Matrix& x) const {
357 258 : double r = 0.0;
358 11593 : for (size_t k = 0; k < m_triplet->nnz; k++) {
359 22670 : r += x.get(static_cast<int*> (m_triplet->i)[k], 0) *
360 11335 : x.get(static_cast<int*> (m_triplet->j)[k], 0) *
361 11335 : (static_cast<double*> (m_triplet->x))[k];
362 : }
363 258 : return r;
364 : }
365 :
366 285 : double Matrix::quad(Matrix & x) const {
367 : //LCOV_EXCL_START
368 : if (!x.isColumnVector()) {
369 : throw std::invalid_argument("Method `quadratic` can only be applied to vectors!");
370 : }
371 : if (getNcols() != getNrows()) {
372 : throw std::invalid_argument("Method `quadratic` can only be applied on square matrices!");
373 : }
374 : if (x.getNrows() != m_ncols) {
375 : throw std::invalid_argument("The argument of quad(Matrix&) is not of appropriate dimension.");
376 : }
377 : //LCOV_EXCL_STOP
378 282 : double result = 0.0;
379 :
380 282 : if (MATRIX_DENSE == m_type || MATRIX_LOWERTR == m_type) { /* DENSE or LOWER TRIANGULAR */
381 113 : for (size_t j = 0; j < m_ncols; j++) {
382 832 : for (size_t i = 0; i < m_nrows; i++) {
383 734 : result += x[i] * (!m_transpose ? m_data[i + j * m_nrows] : m_data[j + i * m_ncols]) * x[j];
384 : }
385 15 : }
386 267 : } else if (MATRIX_DIAGONAL == m_type) { /* DIAGONAL */
387 27 : for (size_t i = 0; i < m_nrows; i++) {
388 24 : result += x[i] * x[i] * m_data[i];
389 : }
390 264 : } else if (MATRIX_SYMMETRIC == m_type) { /* SYMMETRIC */
391 66 : for (size_t i = 0; i < m_nrows; i++) {
392 330 : for (size_t j = 0; j < i; j++) {
393 270 : result += 2 * x[i] * get(i, j) * x[j];
394 : }
395 60 : result += x[i] * get(i, i) * x[i];
396 : }
397 258 : } else if (MATRIX_SPARSE == m_type) { /* SPARSE */
398 258 : if (m_triplet != NULL) {
399 258 : result = quadFromTriplet(x);
400 : } else {
401 : /* \todo Implement quadFromSparse */
402 : //LCOV_EXCL_START
403 : throw std::logic_error("Quad on sparse matrix - no triplets found (not implemented yet)");
404 : //LCOV_EXCL_STOP
405 : }
406 : }
407 282 : result /= 2.0;
408 282 : return result;
409 : }
410 :
411 9 : double Matrix::quad(Matrix& x, Matrix & q) const {
412 : //LCOV_EXCL_START
413 : if (!x.isColumnVector()) {
414 : throw std::invalid_argument("Method `quadratic` can only be applied to vectors!");
415 : }
416 : if (!q.isColumnVector()) {
417 : throw std::invalid_argument("Parameter q needs to be a column vector!");
418 : }
419 : if (getNcols() != getNrows()) {
420 : throw std::invalid_argument("Method `quadratic` can only be applied on square matrices!");
421 : }
422 : if (x.getNrows() != m_ncols) {
423 : throw std::invalid_argument("The argument of quad(Matrix&) is not of appropriate dimension.");
424 : }
425 : //LCOV_EXCL_STOP
426 : double t;
427 8 : Matrix r;
428 8 : r = q*x;
429 8 : assert(!r.isEmpty());
430 8 : t = r.get(0, 0) + quad(x); /* q*x + (1/2) x'*Q*x */
431 :
432 8 : return t;
433 : }
434 :
435 2 : void Matrix::plusop() {
436 2 : if (m_type != Matrix::MATRIX_SPARSE) {
437 50001 : for (size_t i = 0; i < length(); i++) {
438 50000 : if (m_data[i] < 0) {
439 37444 : m_data[i] = 0.0;
440 : }
441 : }
442 : } else {
443 1 : if (m_triplet != NULL) {
444 201 : for (size_t k = 0; k < m_triplet->nnz; k++) {
445 200 : double * val = (static_cast<double*> (m_triplet->x)) + k;
446 200 : if (*val < 0) {
447 153 : *val = 0.0;
448 : }
449 : }
450 0 : } else if (m_sparse != NULL) {
451 0 : for (size_t j = 0; j < m_ncols; j++) {
452 0 : int p = (static_cast<int*> (m_sparse->p))[j];
453 0 : int pend = (m_sparse->packed == 1)
454 0 : ? ((static_cast<int*> (m_sparse->p))[j + 1])
455 0 : : p + (static_cast<int*> (m_sparse->nz))[j];
456 0 : for (; p < pend; p++) {
457 0 : double * val = (static_cast<double*> (m_sparse->x)) + p;
458 0 : if (*val < 0) {
459 0 : *val = 0.0;
460 : }
461 : }
462 : }
463 : }
464 : }
465 2 : }
466 :
467 3 : void Matrix::plusop(Matrix* mat) const {
468 3 : if (m_type != Matrix::MATRIX_SPARSE) {
469 3 : if (length() != mat->length()) {
470 0 : throw std::invalid_argument("Input matrix allocation/size error");
471 : }
472 10023 : for (size_t i = 0; i < length(); i++) {
473 10020 : mat->m_data[i] = m_data[i] < 0.0 ? 0.0 : m_data[i];
474 : }
475 : } else {
476 0 : throw std::logic_error("Not implemented yet");
477 : }
478 3 : }
479 :
480 : /********* OPERATORS ************/
481 112714 : bool Matrix::operator==(const Matrix & right) const {
482 112714 : const double tol = 1e-9;
483 225428 : bool result = (m_type == right.m_type) &&
484 225428 : (m_ncols == right.m_ncols) &&
485 225428 : (m_nrows == right.m_nrows);
486 312668 : for (unsigned int i = 0; i < m_nrows; i++) {
487 1040572 : for (size_t j = 0; j < m_ncols; j++) {
488 840618 : result = result && (std::abs(get(i, j) - right.get(i, j)) < tol);
489 : }
490 : }
491 112714 : return result;
492 : }
493 :
494 : //LCOV_EXCL_START
495 :
496 : std::ostream& operator<<(std::ostream& os, const Matrix & obj) {
497 : os << "\nMatrix " << obj.m_nrows << "x" << obj.m_ncols << std::endl;
498 : if (obj.m_transpose) {
499 : os << "Stored as transpose : YES\n";
500 : }
501 : const char * const types[] = {"Dense", "Sparse", "Diagonal", "Lower Triangular", "Symmetric"};
502 : os << "Type: " << types[obj.m_type] << std::endl;
503 : if (obj.m_type == Matrix::MATRIX_SPARSE && obj.m_triplet == NULL && obj.m_sparse != NULL) {
504 : os << "Storage type: Packed Sparse" << std::endl;
505 : for (size_t j = 0; j < obj.m_ncols; j++) {
506 : int p = (static_cast<int*> (obj.m_sparse->p))[j];
507 : int pend = (obj.m_sparse->packed == 1)
508 : ? ((static_cast<int*> (obj.m_sparse->p))[j + 1])
509 : : p + (static_cast<int*> (obj.m_sparse->nz))[j];
510 : for (; p < pend; p++) {
511 : os << "(" << (static_cast<int*> (obj.m_sparse->i))[p] << "," << j << ") : "
512 : << std::setw(9) << std::setprecision(4) << (static_cast<double*> (obj.m_sparse->x))[p] << std::endl;
513 : }
514 : }
515 : return os;
516 : }
517 : for (size_t i = 0; i < obj.m_nrows; i++) {
518 : for (size_t j = 0; j < obj.m_ncols; j++) {
519 : if (obj.m_type == Matrix::MATRIX_SPARSE && obj.get(i, j) == 0) {
520 : os << std::setw(8) << "0";
521 : } else {
522 :
523 : os << std::setw(8) << std::setprecision(4) << obj.get(i, j);
524 : }
525 : }
526 : os << std::endl;
527 : }
528 : return os;
529 : }
530 : //LCOV_EXCL_STOP
531 :
532 4755202 : double &Matrix::operator[](size_t sub) const {
533 : //LCOV_EXCL_START
534 : // if (sub >= length()) {
535 : // throw std::out_of_range("Exception: Index out of range for Matrix");
536 : // }
537 : //LCOV_EXCL_STOP
538 4755202 : return m_data[sub];
539 : }
540 :
541 52832 : inline void Matrix::_addIJ(size_t i, size_t j, double a) {
542 52832 : set(i, j, get(i, j) + a); // A(i,j) += a
543 52832 : }
544 :
545 139542 : inline void Matrix::_addIJ(size_t i, size_t j, double a, double gamma) {
546 139542 : set(i, j, gamma * get(i, j) + a); // A(i,j) += a
547 139542 : }
548 :
549 : inline void Matrix::vectorAdd(size_t len, double* pV1, const double* pV2) {
550 : #ifdef USE_LIBS
551 : cblas_daxpy(len, 1.0, pV2, 1, pV1, 1); // data = data + right.data
552 : #else
553 : for (int i = 0; i < len; i++)
554 : pV1[i] += pV2[i];
555 : #endif
556 : }
557 :
558 107792 : Matrix& Matrix::operator+=(Matrix & right) {
559 : //LCOV_EXCL_START
560 : if (m_ncols != right.m_ncols || m_nrows != right.m_nrows) {
561 : throw std::invalid_argument("Incompatible dimensions while using +=!");
562 : }
563 : //LCOV_EXCL_STOP
564 :
565 107791 : if (&right == this) {
566 0 : *this *= 2.0;
567 0 : return *this;
568 : }
569 :
570 107791 : const double alpha = 1.0;
571 107791 : const double gamma = 1.0;
572 107791 : add(*this, alpha, right, gamma);
573 107791 : return *this;
574 : }
575 :
576 81336 : Matrix & Matrix::operator-=(Matrix & right) {
577 : //LCOV_EXCL_START
578 : if (m_ncols != right.m_ncols || m_nrows != right.m_nrows) {
579 : throw std::invalid_argument("Incompatible dimensions while using +=!");
580 : }
581 : //LCOV_EXCL_STOP
582 :
583 81335 : const double alpha = -1.0;
584 81335 : const double gamma = 1.0;
585 81335 : add(*this, alpha, right, gamma);
586 81335 : return *this;
587 : }
588 :
589 24423 : Matrix Matrix::operator+(Matrix & right) const {
590 24423 : if (this->getNrows() != right.getNrows() || this->getNcols() != right.getNcols()) {
591 1 : throw std::invalid_argument("Addition of matrices of incompatible dimensions!");
592 : }
593 24422 : Matrix result(*this); // Make a copy of myself.
594 24422 : result += right;
595 :
596 24422 : return result;
597 : }
598 :
599 81332 : Matrix Matrix::operator-(Matrix & right) const {
600 81332 : if (this->getNrows() != right.getNrows() || this->getNcols() != right.getNcols()) {
601 1 : throw std::invalid_argument("Addition of matrices of incompatible dimensions!");
602 : }
603 81331 : Matrix result(*this); // Make a copy of myself.
604 81331 : result -= right;
605 :
606 81331 : return result;
607 : }
608 :
609 160436 : Matrix Matrix::operator*(Matrix & right) {
610 481674 : if (!(getType() == Matrix::MATRIX_SPARSE && right.getType() == Matrix::MATRIX_SPARSE) &&
611 387328 : isColumnVector() && right.isColumnVector() && length() == right.length()) {
612 31253 : double t = 0.0;
613 : // multiplication of two column vectors = dot product
614 31253 : Matrix r(1, 1);
615 353248 : for (size_t i = 0; i < m_nrows * m_ncols; i++) {
616 321995 : t += m_data[i] * right.m_data[i];
617 : }
618 31253 : r[0] = t;
619 31253 : return r;
620 : }
621 129183 : if (!(isColumnVector() && right.isColumnVector()) && (m_ncols != right.m_nrows)) {
622 1 : std::ostringstream oss;
623 1 : oss << "Matrix::operator* : (*this) (" << getNrows() << "x" << getNcols()
624 2 : << ") and RHS (" << right.getNrows() << "x" << right.getNcols()
625 1 : << ") do not have compatible dimensions";
626 1 : throw std::invalid_argument(oss.str().c_str());
627 : }
628 129182 : Matrix result;
629 129182 : switch (m_type) {
630 : case MATRIX_DENSE: // Left-hand side matrix is dense
631 127578 : result = multiplyLeftDense(right);
632 127578 : break;
633 : case MATRIX_DIAGONAL:
634 5 : result = multiplyLeftDiagonal(right);
635 5 : break;
636 : case MATRIX_SYMMETRIC:
637 28 : result = multiplyLeftSymmetric(right);
638 28 : break;
639 : case MATRIX_SPARSE:
640 1571 : result = multiplyLeftSparse(right);
641 1571 : break;
642 : case MATRIX_LOWERTR:
643 0 : throw std::logic_error("Lower triangular multiplication not implemented yet");
644 : default:
645 0 : throw std::logic_error("unsupported");
646 : }
647 129182 : return result;
648 : }
649 :
650 971739 : Matrix & Matrix::operator=(const Matrix & right) {
651 : // Check for self-assignment!
652 971739 : if (this == &right) {// Same object?
653 5 : return *this; // Yes, so skip assignment, and just return *this.
654 : }
655 :
656 : /*
657 : * Copy basic properties
658 : */
659 971734 : m_delete_data = (right.m_type != Matrix::MATRIX_SPARSE);
660 971734 : m_ncols = right.m_ncols;
661 971734 : m_nrows = right.m_nrows;
662 971734 : m_type = right.m_type;
663 971734 : m_triplet = NULL;
664 971734 : m_sparse = NULL;
665 971734 : m_dense = NULL;
666 971734 : m_transpose = right.m_transpose;
667 971734 : m_sparseStorageType = right.m_sparseStorageType;
668 :
669 : /*
670 : * Shallow copies remain shallow - this assignment operator respects shallowness.
671 : */
672 971734 : if (!right.m_delete_data && right.m_type != Matrix::MATRIX_SPARSE) {
673 128 : m_delete_data = right.m_delete_data;
674 128 : m_data = right.m_data;
675 128 : m_dataLength = right.m_dataLength;
676 128 : return *this;
677 : }
678 :
679 : /*
680 : * copy m_data only if
681 : * (i) the matrix is not sparse
682 : * (ii) the matrix is not shallow
683 : */
684 971606 : if (right.m_type != MATRIX_SPARSE && right.m_delete_data) {
685 : /* If no data has been allocated, allocate using `new` */
686 968764 : if (m_data == NULL || m_dataLength == 0) {
687 127801 : m_data = new double[right.m_dataLength];
688 : } else {
689 : /* If already more memory is allocated in this object */
690 840963 : if (m_dataLength == right.m_dataLength) {
691 :
692 : } else {
693 43200 : m_data = static_cast<double *> (realloc(m_data, right.m_dataLength * sizeof (double)));
694 : }
695 : }
696 968764 : m_delete_data = true;
697 : }
698 :
699 971606 : m_dataLength = right.m_dataLength;
700 :
701 : // <editor-fold defaultstate="collapsed" desc="Sparse Matrices Only">
702 971606 : if (m_type == MATRIX_SPARSE) {
703 2842 : if (right.m_triplet != NULL) {
704 1637 : m_triplet = cholmod_copy_triplet(right.m_triplet, Matrix::cholmod_handle());
705 : }
706 2842 : if (right.m_sparse != NULL) {
707 1231 : m_sparse = cholmod_copy_sparse(right.m_sparse, Matrix::cholmod_handle());
708 : }
709 2842 : if (right.m_dense != NULL) {
710 0 : m_dense = cholmod_copy_dense(right.m_dense, Matrix::cholmod_handle());
711 : }
712 : }// </editor-fold>
713 :
714 971606 : if (Matrix::MATRIX_SPARSE != right.getType() && right.m_data != NULL) {
715 : #ifdef USE_LIBS
716 968764 : cblas_dcopy(m_dataLength, right.m_data, 1, m_data, 1);
717 : #else
718 : for (int i = 0; i < m_dataLength; i++)
719 : m_data[i] = right[i];
720 : #endif
721 : }
722 971606 : return *this;
723 : }
724 :
725 : /********* PRIVATE METHODS ************/
726 103 : void Matrix::domm(const Matrix &right, Matrix & result) const {
727 : // multiply with LHS being dense
728 : double t;
729 716 : for (size_t j = 0; j < right.m_ncols; j++) {
730 5470 : for (size_t i = 0; i < m_nrows; i++) {
731 4857 : t = 0.0;
732 33910 : for (size_t k = 0; k < m_ncols; k++) {
733 29053 : if (!(right.getType() == MATRIX_LOWERTR && k < j)) {
734 29029 : t += get(i, k) * right.get(k, j);
735 : }
736 : }
737 4857 : result.set(i, j, t);
738 : }
739 : }
740 103 : }
741 :
742 100 : void Matrix::domm(Matrix& C, double alpha, Matrix& A, Matrix& B, double gamma) {
743 : // multiply with A being dense
744 : double t;
745 700 : for (size_t j = 0; j < B.m_ncols; j++) {
746 5400 : for (size_t i = 0; i < C.m_nrows; i++) {
747 4800 : t = 0.0;
748 33600 : for (size_t k = 0; k < C.m_ncols; k++) {
749 28800 : if (!(B.getType() == MATRIX_LOWERTR && k < j)) {
750 28800 : t += A.get(i, k) * B.get(k, j);
751 : }
752 : }
753 4800 : C._addIJ(i, j, alpha*t, gamma);
754 : }
755 : }
756 100 : }
757 :
758 127578 : Matrix Matrix::multiplyLeftDense(const Matrix & right) const {
759 127578 : if (MATRIX_DENSE == right.m_type) { // RHS is also dense
760 127170 : Matrix result(m_nrows, right.m_ncols);
761 : #ifdef USE_LIBS
762 : cblas_dgemm(CblasColMajor,
763 : m_transpose ? CblasTrans : CblasNoTrans,
764 : right.m_transpose ? CblasTrans : CblasNoTrans,
765 : m_nrows, right.m_ncols, m_ncols, 1.0, m_data, m_transpose ? m_ncols : m_nrows,
766 : right.m_data, right.m_transpose ? right.m_ncols : right.m_nrows, 0.0,
767 127170 : result.m_data, m_nrows);
768 : #else
769 : domm(right, result);
770 : #endif
771 127170 : return result;
772 408 : } else if (MATRIX_DIAGONAL == right.m_type) { // {DENSE} * {DIAGONAL} = {DENSE} - RHS is diagonal
773 304 : Matrix result(getNrows(), getNcols());
774 2139 : for (size_t j = 0; j < getNcols(); j++) {
775 16596 : for (size_t i = 0; i < getNrows(); i++) {
776 14761 : result.set(i, j, (!m_transpose ? m_data[i + j * m_nrows] : m_data[j + i * m_ncols]) * right.m_data[j]);
777 : }
778 : }
779 304 : return result;
780 104 : } else if (MATRIX_SYMMETRIC == right.m_type || MATRIX_LOWERTR == right.m_type) {
781 102 : Matrix result(m_nrows, right.m_ncols, Matrix::MATRIX_DENSE);
782 102 : domm(right, result);
783 102 : return result;
784 : } else { /* {DENSE} * {SPARSE} = */
785 : /*
786 : * Trick: For D: dense, S: sparse it is
787 : * D * S = (S' * D')'
788 : */
789 2 : Matrix tempSparse(right);
790 2 : (const_cast<Matrix&> (*this)).transpose(); /* D' */
791 2 : tempSparse.transpose(); /* S' */
792 4 : Matrix r = tempSparse * (const_cast<Matrix&> (*this)); /* r = S' * D' */
793 2 : r.transpose(); /* r' */
794 2 : (const_cast<Matrix&> (*this)).transpose(); /* D */
795 4 : return r;
796 : }
797 : }
798 :
799 28 : Matrix Matrix::multiplyLeftSymmetric(const Matrix & right) const {
800 : // multiply when the LHS is symmetric
801 28 : Matrix result(m_nrows, right.m_ncols);
802 28 : if (right.isColumnVector()) {
803 : #ifdef USE_LIBS
804 : cblas_dspmv(CblasColMajor,
805 : CblasLower,
806 : m_nrows, 1.0, m_data,
807 : right.m_data, 1,
808 27 : 0.0, result.m_data, 1);
809 27 : return result;
810 : #endif
811 : }
812 1 : domm(right, result);
813 1 : return result;
814 : }
815 :
816 5 : Matrix Matrix::multiplyLeftDiagonal(const Matrix & right) const {
817 : // multiply when the LHS is diagonal
818 5 : Matrix result(m_nrows, right.m_ncols, right.m_type);
819 51 : for (size_t i = 0; i < m_nrows; i++) {
820 46 : if (MATRIX_SYMMETRIC == right.m_type) {
821 65 : for (size_t j = i; j < right.m_ncols; j++) {
822 55 : result.set(i, j, m_data[i] * right.get(i, j));
823 : }
824 36 : } else if (MATRIX_DENSE == right.m_type) {
825 150 : for (size_t j = 0; j < right.m_ncols; j++) {
826 130 : result.set(i, j, m_data[i] * right.get(i, j));
827 : }
828 16 : } else if (MATRIX_DIAGONAL == right.m_type) {
829 10 : result.set(i, i, m_data[i] * right.m_data[i]);
830 6 : } else if (MATRIX_LOWERTR == right.m_type) {
831 27 : for (size_t j = 0; j <= i; j++) {
832 :
833 21 : result.set(i, j, m_data[i] * right.get(i, j));
834 : }
835 : }
836 : }
837 5 : return result;
838 : }
839 :
840 1571 : Matrix Matrix::multiplyLeftSparse(Matrix & right) {
841 1571 : if (right.m_type == MATRIX_SPARSE) {
842 : // RHS is sparse
843 1205 : if (m_sparse == NULL) {
844 904 : _createSparse();
845 : }
846 1205 : if (right.m_sparse == NULL) {
847 902 : right._createSparse();
848 : }
849 : cholmod_sparse *r;
850 : r = cholmod_ssmult(
851 1208 : (isColumnVector() && right.isColumnVector()) ? cholmod_transpose(m_sparse, 1, Matrix::cholmod_handle()) : m_sparse,
852 : right.m_sparse,
853 : 0,
854 : true,
855 : false,
856 2413 : Matrix::cholmod_handle());
857 1205 : Matrix result(true);
858 1205 : if (isColumnVector() && right.isColumnVector()) { /* Sparse-sparse dot product */
859 3 : result = Matrix(1, 1, Matrix::MATRIX_SPARSE);
860 : } else {
861 1202 : result = Matrix(m_nrows, right.m_ncols, Matrix::MATRIX_SPARSE);
862 : }
863 1205 : result.m_sparse = r;
864 1205 : result._createTriplet();
865 1205 : return result;
866 366 : } else if (right.m_type == MATRIX_DENSE) { /* SPRASE * DENSE */
867 : // RHS is dense
868 65 : Matrix result(getNrows(), right.getNcols());
869 :
870 65 : if (m_triplet != NULL && m_sparse == NULL)
871 56 : _createSparse();
872 :
873 65 : double alpha[2] = {1.0, 0.0};
874 65 : double beta[2] = {0.0, 0.0};
875 :
876 65 : if (right.m_dense == NULL) { /* Prepare right.m_dense */
877 : right.m_dense = cholmod_allocate_dense(
878 : right.getNrows(),
879 : right.getNcols(),
880 : right.getNrows(),
881 : CHOLMOD_REAL,
882 60 : Matrix::cholmod_handle());
883 :
884 60 : if (!right.m_transpose) {
885 8 : right.m_dense->x = right.m_data;
886 : } else {
887 : /* Store RHS as transpose into right.m_dense */
888 717 : for (size_t i = 0; i < right.getNrows(); i++) { /* SPARSE x DENSE' */
889 16990 : for (size_t j = 0; j < right.getNcols(); j++) {
890 16325 : (static_cast<double*> (right.m_dense->x))[i + j * right.getNrows()] =
891 16325 : right.get(i, j);
892 : }
893 : }
894 : }
895 : }
896 :
897 65 : bool dotProd = isColumnVector() && right.isColumnVector();
898 : result.m_dense = cholmod_allocate_dense(
899 : dotProd ? 1 : result.getNrows(),
900 : dotProd ? 1 : result.getNcols(),
901 : dotProd ? 1 : result.getNrows(),
902 : CHOLMOD_REAL,
903 65 : Matrix::cholmod_handle());
904 65 : if (m_transpose) {
905 4 : this->transpose();
906 : }
907 : cholmod_sdmult(
908 : m_sparse,
909 : dotProd ? true : m_transpose,
910 : alpha,
911 : beta,
912 : right.m_dense,
913 : result.m_dense,
914 : Matrix::cholmod_handle()
915 65 : );
916 65 : if (m_transpose) {
917 0 : this->transpose();
918 : }
919 31614 : for (size_t k = 0; k < result.length(); k++) {
920 31549 : result.m_data[k] = (static_cast<double*> (result.m_dense->x))[k];
921 : }
922 65 : return result;
923 301 : } else if (right.m_type == MATRIX_DIAGONAL) { // SPARSE * DIAGONAL = SPARSE
924 301 : Matrix result(*this); // COPY [result := right]
925 12326 : for (size_t k = 0; k < result.m_triplet->nnz; k++) {
926 12025 : int j_ = (static_cast<int*> (result.m_triplet->j))[k];
927 12025 : (static_cast<double*> (result.m_triplet->x))[k] *= right.m_data[j_];
928 : }
929 301 : return result;
930 : } else {
931 : //LCOV_EXCL_START
932 : throw std::invalid_argument("SPARSE * {SYMMETRIC/LOWER/UPPER TRIANGUAL}: not supported");
933 : //LCOV_EXCL_STOP
934 : }
935 : }
936 :
937 756262 : bool Matrix::indexWithinBounds(size_t i, size_t j) const {
938 :
939 756262 : return (i < m_nrows && j < m_ncols) && !(m_type == MATRIX_LOWERTR && i < j);
940 : }
941 :
942 1912386 : Matrix::MatrixType Matrix::getType() const {
943 :
944 1912386 : return m_type;
945 : }
946 :
947 628377 : void Matrix::init(size_t nr, size_t nc, MatrixType mType) {
948 628377 : this -> m_transpose = false;
949 628377 : this -> m_ncols = nc;
950 628377 : this -> m_nrows = nr;
951 628377 : this -> m_type = mType;
952 628377 : this -> m_data = NULL;
953 628377 : this -> m_delete_data = true;
954 628377 : this -> m_triplet = NULL;
955 628377 : this -> m_sparse = NULL;
956 628377 : this -> m_dense = NULL;
957 628377 : switch (m_type) {
958 : case MATRIX_DENSE:
959 619664 : m_dataLength = nc * nr;
960 619664 : m_data = new double[m_dataLength]();
961 619664 : break;
962 : case MATRIX_DIAGONAL:
963 633 : if (nc != nr) {
964 : //LCOV_EXCL_START
965 : throw std::invalid_argument("Diagonal matrices must be square!!!");
966 : //LCOV_EXCL_STOP
967 : }
968 632 : m_dataLength = nc;
969 632 : m_data = new double[m_dataLength]();
970 632 : break;
971 : case MATRIX_LOWERTR:
972 : case MATRIX_SYMMETRIC:
973 194 : if (nc != nr) {
974 : //LCOV_EXCL_START
975 : throw std::invalid_argument("Lower triangular and symmetric matrices must be square!!!");
976 : //LCOV_EXCL_STOP
977 : }
978 192 : m_dataLength = nc * (nc + 1) / 2;
979 192 : m_data = new double[m_dataLength]();
980 192 : break;
981 : case MATRIX_SPARSE:
982 7886 : m_data = NULL;
983 7886 : break;
984 : default:
985 0 : throw std::logic_error("unsupported");
986 : }
987 :
988 628374 : }
989 :
990 15219 : void Matrix::_createSparse() {
991 15219 : if (m_triplet != NULL) { // from triplets
992 13713 : m_sparse = cholmod_triplet_to_sparse(m_triplet, m_triplet->nzmax, Matrix::cholmod_handle());
993 13713 : return;
994 : }
995 1506 : if (m_dense != NULL) { // from dense
996 0 : m_sparse = cholmod_dense_to_sparse(m_dense, true, Matrix::cholmod_handle());
997 0 : return;
998 : }
999 : }
1000 :
1001 5752 : void Matrix::_createTriplet() {
1002 5752 : _createSparse();
1003 5752 : if (m_sparse != NULL) { /* make triplets from sparse */
1004 5752 : m_triplet = cholmod_sparse_to_triplet(m_sparse, Matrix::cholmod_handle());
1005 : }
1006 5752 : }
1007 :
1008 54 : bool Matrix::isSymmetric() const {
1009 75 : return (m_nrows == m_ncols) && ((Matrix::MATRIX_SYMMETRIC == m_type)
1010 19 : || (Matrix::MATRIX_SPARSE == m_type && m_triplet != NULL && m_triplet->stype != 0)
1011 73 : || (Matrix::MATRIX_DIAGONAL == m_type));
1012 : }
1013 :
1014 7733 : Matrix& operator*=(Matrix& obj, double alpha) {
1015 7733 : if (obj.m_type != Matrix::MATRIX_SPARSE) {
1016 4131 : assert(obj.m_data != NULL);
1017 4131 : cblas_dscal(obj.m_dataLength, alpha, obj.m_data, 1);
1018 : } else {
1019 3602 : obj._createTriplet();
1020 3602 : obj.m_sparse = NULL;
1021 3602 : obj.m_dense = NULL;
1022 272693 : for (size_t k = 0; k < obj.m_triplet->nnz; k++) {
1023 269091 : (static_cast<double*> (obj.m_triplet->x))[k] *= alpha;
1024 : }
1025 : }
1026 7733 : return obj;
1027 : }
1028 :
1029 16 : Matrix operator*(double alpha, Matrix& obj) {
1030 16 : Matrix M(obj);
1031 16 : M *= alpha;
1032 16 : return (M);
1033 : }
1034 :
1035 0 : std::string Matrix::getTypeString() const {
1036 : //LCOV_EXCL_START
1037 : const char names[5][10] = {
1038 : "dense",
1039 : "sparse",
1040 : "diagonal",
1041 : "lower",
1042 : "symmetric"
1043 : };
1044 :
1045 : int i = static_cast<int> (getType());
1046 : std::string s = std::string(names[i]);
1047 : return s;
1048 : //LCOV_EXCL_STOP
1049 : }
1050 :
1051 219513 : Matrix Matrix::submatrixCopy(size_t row_start, size_t row_end, size_t col_start, size_t col_end) {
1052 : /* DONE: Fully tested */
1053 : //LCOV_EXCL_START
1054 : if (row_end < row_start || col_end < col_start) {
1055 : throw std::out_of_range("Matrix::submatrixCopy:: start > end is not allowed");
1056 : }
1057 : if (row_end > m_nrows) {
1058 : throw std::out_of_range("Matrix::submatrixCopy:: row_end > total number of rows");
1059 : }
1060 : if (col_end > m_ncols) {
1061 : throw std::out_of_range("Matrix::submatrixCopy:: col_end > total number of columns");
1062 : }
1063 : //LCOV_EXCL_STOP
1064 219513 : size_t rows = row_end - row_start + 1;
1065 219513 : size_t cols = col_end - col_start + 1;
1066 :
1067 219513 : Matrix M(rows, cols, m_type);
1068 219513 : if (m_type == Matrix::MATRIX_DENSE) {
1069 : /*
1070 : * void dlacpy_(
1071 : * char* uplo,
1072 : * lapack_int* m,
1073 : * lapack_int* n,
1074 : * const double* a,
1075 : * lapack_int* lda,
1076 : * double* b,
1077 : * lapack_int* ldb );
1078 : *
1079 : */
1080 : dlacpy_(const_cast<char*> ("A"),
1081 : reinterpret_cast<int*> (m_transpose ? &cols : &rows),
1082 : reinterpret_cast<int*> (m_transpose ? &rows : &cols),
1083 439024 : m_data + (m_transpose ? row_start * m_ncols + col_start : row_start + col_start * m_nrows),
1084 : const_cast<int*> (reinterpret_cast<const int*> (m_transpose ? &m_ncols : &m_nrows)),
1085 : M.m_data,
1086 658536 : reinterpret_cast<int*> (m_transpose ? &cols : &rows));
1087 219512 : M.m_transpose = m_transpose;
1088 1 : } else if (m_type == Matrix::MATRIX_SPARSE) {
1089 1 : int * rs = new int[rows];
1090 1 : int * cs = new int[cols];
1091 3 : for (size_t i = 0; i <= rows; i++) {
1092 2 : rs[i] = row_start + i;
1093 : }
1094 3 : for (size_t j = 0; j <= cols; j++) {
1095 2 : cs[j] = col_start + j;
1096 : }
1097 1 : this->_createSparse();
1098 : // int nnz = std::max(1, (int) (rows * cols / 20));
1099 : cholmod_sparse * sp; // = cholmod_allocate_sparse(rows, cols, nnz, 1, 1, m_sparse->stype, m_sparse->xtype, cholmod_handle());
1100 1 : sp = cholmod_submatrix(m_sparse, rs, rows, cs, cols, 1, 1, Matrix::cholmod_handle());
1101 1 : M.m_sparse = sp;
1102 1 : M._createTriplet();
1103 : } else {
1104 : //LCOV_EXCL_START
1105 : throw std::logic_error("Matrix::submatrixCopy is available only for MATRIX_DENSE and MATRIX_SPARSE type matrices.");
1106 : //LCOV_EXCL_STOP
1107 : }
1108 219513 : return M;
1109 : }
1110 :
1111 108405 : Matrix Matrix::multiplySubmatrix(
1112 : Matrix& right,
1113 : const size_t left_row_start,
1114 : const size_t left_row_end,
1115 : const size_t left_col_start,
1116 : const size_t left_col_end,
1117 : const size_t right_row_start,
1118 : const size_t right_row_end,
1119 : const size_t right_col_start,
1120 : const size_t right_col_end) {
1121 :
1122 108405 : if (MATRIX_DENSE == m_type && MATRIX_DENSE == right.m_type) {
1123 108405 : size_t left_cols = left_col_end - left_col_start + 1;
1124 108405 : size_t left_rows = left_row_end - left_row_start + 1;
1125 :
1126 108405 : size_t right_cols = right_col_end - right_col_start + 1;
1127 108405 : size_t right_rows = right_row_end - right_row_start + 1;
1128 :
1129 108405 : if (left_cols != right_rows) {
1130 : //LCOV_EXCL_START
1131 : throw std::invalid_argument("Dimension of sub-matrix mismatch (left_cols!=right_rows)");
1132 : //LCOV_EXCL_STOP
1133 : }
1134 :
1135 : size_t left_start_idx =
1136 : m_transpose
1137 44550 : ? left_row_start * m_ncols + left_col_start
1138 152955 : : left_row_start + left_col_start * m_nrows;
1139 : size_t right_start_idx =
1140 : right.m_transpose
1141 36936 : ? right_row_start * right.m_ncols + right_col_start
1142 145341 : : right_row_start + right_col_start * right.m_nrows;
1143 :
1144 108405 : Matrix result(left_rows, right_cols, MATRIX_DENSE);
1145 :
1146 : cblas_dgemm(
1147 : CblasColMajor,
1148 : m_transpose ? CblasTrans : CblasNoTrans,
1149 : right.m_transpose ? CblasTrans : CblasNoTrans,
1150 : left_rows,
1151 : right_cols,
1152 : left_cols,
1153 : 1.0,
1154 : m_data + left_start_idx,
1155 : m_transpose ? m_ncols : m_nrows,
1156 : right.m_data + right_start_idx,
1157 : right.m_transpose ? right.m_ncols : right.m_nrows,
1158 : 0.0,
1159 : result.m_data,
1160 108405 : left_rows);
1161 216810 : return result;
1162 :
1163 : } else {
1164 : //LCOV_EXCL_START
1165 : throw std::logic_error("Matrix::multiplySubmatrix is implemented only for dense matrix multiplication.");
1166 : //LCOV_EXCL_STOP
1167 : }
1168 : }
1169 :
1170 6 : void Matrix::toggle_diagonal() {
1171 6 : if (m_type != MATRIX_DENSE && m_type != MATRIX_DIAGONAL) { /* neither dense nor diagonal: unsupported. */
1172 1 : throw std::invalid_argument("Only dense vectors and diagonal matrices are supported");
1173 : }
1174 5 : if (!isColumnVector() && m_type != MATRIX_DIAGONAL) {
1175 1 : throw std::invalid_argument("Can only be applied to column vectors and diagonal matrices");
1176 : }
1177 4 : m_type = isColumnVector() ? MATRIX_DIAGONAL : MATRIX_DENSE;
1178 4 : if (isColumnVector()) {
1179 2 : m_ncols = m_nrows;
1180 : } else {
1181 2 : m_ncols = 1;
1182 : }
1183 4 : }
1184 :
1185 2582 : Matrix::Matrix(bool shallow) {
1186 2582 : m_data = NULL;
1187 2582 : m_delete_data = false;
1188 2582 : m_nrows = 0;
1189 2582 : m_ncols = 0;
1190 2582 : m_dataLength = 0;
1191 2582 : m_type = MATRIX_DENSE;
1192 2582 : m_transpose = false;
1193 2582 : m_triplet = NULL;
1194 2582 : m_sparse = NULL;
1195 2582 : m_dense = NULL;
1196 2582 : m_sparseStorageType = CHOLMOD_TYPE_TRIPLET;
1197 2582 : }
1198 :
1199 300995 : int Matrix::add(Matrix& C, double alpha, Matrix& A, double gamma) {
1200 : // A and C must have compatible dimensions
1201 300995 : if (C.getNcols() != A.getNcols() || C.getNrows() != A.getNrows()) {
1202 1 : throw std::invalid_argument("LHS and RHS do not have compatible dimensions");
1203 : }
1204 : // C := gamma * C + alpha * A
1205 : int status;
1206 300994 : switch (C.getType()) {
1207 : case MATRIX_DENSE: /* DENSE += ? */
1208 297053 : status = generic_add_helper_left_dense(C, alpha, A, gamma);
1209 297053 : break;
1210 : case MATRIX_SYMMETRIC: /* SYMMETRIC += ? */
1211 6 : status = generic_add_helper_left_symmetric(C, alpha, A, gamma);
1212 6 : break;
1213 : case MATRIX_LOWERTR: /* LOWER TRIANGULAR += ? */
1214 2 : status = generic_add_helper_left_lower_tri(C, alpha, A, gamma);
1215 2 : break;
1216 : case MATRIX_DIAGONAL: /* DIAGONAL += ? */
1217 1 : status = generic_add_helper_left_diagonal(C, alpha, A, gamma);
1218 1 : break;
1219 : case MATRIX_SPARSE: /* SPARSE += ? */
1220 3932 : status = generic_add_helper_left_sparse(C, alpha, A, gamma);
1221 3932 : break;
1222 : default:
1223 0 : status = ForBESUtils::STATUS_UNDEFINED_FUNCTION;
1224 0 : break;
1225 : }
1226 300994 : return status;
1227 : }
1228 :
1229 297053 : int Matrix::generic_add_helper_left_dense(Matrix& C, double alpha, Matrix& A, double gamma) {
1230 : /* HEREAFTER, C is DENSE! */
1231 :
1232 297053 : Matrix::MatrixType type_of_A = A.getType();
1233 :
1234 297053 : bool is_gamma_one = (std::abs(gamma - 1.0) < std::numeric_limits<double>::epsilon());
1235 :
1236 297053 : if (type_of_A == MATRIX_DENSE && C.m_transpose == A.m_transpose) {
1237 296003 : if (is_gamma_one) {
1238 268925 : cblas_daxpy(A.length(), alpha, A.m_data, 1, C.m_data, 1);
1239 268925 : return ForBESUtils::STATUS_OK;
1240 : } else {
1241 229478 : for (size_t i = 0; i < A.length(); i++) {
1242 202400 : C.m_data[i] = (gamma * C.m_data[i]) + (alpha * A.m_data[i]);
1243 : }
1244 27078 : }
1245 1050 : } else if (type_of_A == MATRIX_DIAGONAL) { /* DENSE + DIAGONAL */
1246 4 : if (is_gamma_one) {
1247 116 : for (size_t i = 0; i < A.length(); i++)
1248 113 : C.m_data[i * (1 + C.m_nrows)] += (alpha * A.m_data[i]);
1249 : } else {
1250 : // C := gamma * C and C is dense [generic_add_helper_left_dense]
1251 1 : cblas_dscal(C.m_dataLength, gamma, C.m_data, 1);
1252 11 : for (size_t i = 0; i < C.getNcols(); i++)
1253 10 : C[i * (1 + C.m_nrows)] += (alpha * A.m_data[i]);
1254 : }
1255 1046 : } else if (type_of_A == MATRIX_LOWERTR) { /* DENSE + LOWER */
1256 : /* TODO This is to be tested!!! */
1257 92 : for (size_t i = 0; i < A.getNrows(); i++) {
1258 90 : if (!A.m_transpose) {
1259 3320 : for (size_t j = 0; j <= i; j++) {
1260 3240 : C._addIJ(i, j, alpha * A.get(i, j), gamma);
1261 : }
1262 : } else {
1263 65 : for (size_t j = i; j < A.m_ncols; j++) {
1264 55 : C._addIJ(i, j, alpha * A.get(i, j), gamma);
1265 : }
1266 : }
1267 : }
1268 1044 : } else if (type_of_A == MATRIX_SPARSE) {
1269 940 : A._createTriplet();
1270 940 : assert(A.m_triplet != NULL);
1271 940 : if (!is_gamma_one) {
1272 637 : C *= gamma;
1273 : }
1274 53772 : for (size_t k = 0; k < A.m_triplet->nnz; k++) {
1275 52832 : int i_ = (static_cast<int *> (A.m_triplet->i))[k];
1276 52832 : int j_ = (static_cast<int *> (A.m_triplet->j))[k];
1277 52832 : if (A.m_transpose) std::swap(i_, j_);
1278 52832 : C._addIJ(i_, j_, alpha * (static_cast<double *> (A.m_triplet->x))[k]);
1279 : }
1280 : } else { /* Symmetric and Dense+Dense' or Dense'+Dense (not of same transpose type) */
1281 1854 : for (size_t i = 0; i < A.getNrows(); i++) {
1282 56850 : for (size_t j = 0; j < A.getNcols(); j++) {
1283 55100 : C._addIJ(i, j, alpha * A.get(i, j), gamma);
1284 : }
1285 : }
1286 : }
1287 28128 : return ForBESUtils::STATUS_OK;
1288 : }
1289 :
1290 6 : int Matrix::generic_add_helper_left_symmetric(Matrix& C, double alpha, Matrix& A, double gamma) {
1291 6 : int status = ForBESUtils::STATUS_OK;
1292 6 : Matrix::MatrixType type_of_A = A.getType();
1293 6 : size_t ncols = A.getNcols();
1294 6 : size_t nrows = A.getNrows();
1295 6 : bool is_gamma_one = (std::abs(gamma - 1.0) < std::numeric_limits<double>::epsilon());
1296 6 : if (type_of_A == MATRIX_SYMMETRIC) {
1297 1 : if (is_gamma_one) {
1298 1 : cblas_daxpy(A.length(), alpha, A.m_data, 1, C.m_data, 1);
1299 : } else {
1300 0 : for (size_t i = 0; i < A.length(); i++) {
1301 0 : C.m_data[i] = (gamma * C.m_data[i]) + (alpha * A.m_data[i]);
1302 : }
1303 : }
1304 5 : } else if (type_of_A == MATRIX_DIAGONAL) { /* SYMMETRIC + DIAGONAL */
1305 902 : for (size_t i = 0; i < ncols; i++) {
1306 900 : size_t idx = i + C.m_nrows * i - i * (i + 1) / 2;
1307 900 : double val = C.m_data[idx];
1308 900 : C.m_data[idx] = gamma * val + alpha * A.m_data[i];
1309 : }
1310 3 : } else if (type_of_A == MATRIX_LOWERTR || type_of_A == MATRIX_DENSE) { /* SYMMETRIC + LOWER_TRI/DENSE = DENSE */
1311 2 : C.m_dataLength = ncols * nrows; /* SYMMETRIC + DENSE = DENSE */
1312 2 : double * newData = new double[C.m_dataLength]; // realloc data
1313 202 : for (size_t i = 0; i < nrows; i++) {
1314 20200 : for (size_t j = 0; j < ncols; j++) {
1315 20000 : newData[i + j * nrows] = gamma * C.get(i, j); // load data (recast into full storage format)
1316 20000 : if ((C.m_type == MATRIX_LOWERTR && type_of_A == MATRIX_DENSE) ? i >= j : true) {
1317 20000 : newData[i + j * nrows] += alpha * A.get(i, j); // add RHS elements
1318 : }
1319 : }
1320 : }
1321 : double * temp_data;
1322 2 : temp_data = static_cast<double *> (realloc(C.m_data, C.m_dataLength * sizeof (double))); // reallocate memory
1323 2 : if (temp_data == NULL) { /* could not allocate using realloc */
1324 0 : delete[] C.m_data;
1325 0 : if (newData != NULL) {
1326 0 : delete[] newData;
1327 : }
1328 0 : throw std::bad_alloc();
1329 2 : } else if (temp_data == C.m_data) { /* nothing happened */
1330 0 : temp_data = NULL;
1331 : } else { /* realloc succeeded */
1332 2 : C.m_data = temp_data; /* have m_data point to temp_data */
1333 2 : temp_data = NULL; /* nullify temp_data */
1334 : }
1335 2 : C.m_data = static_cast<double *> (memcpy(C.m_data, newData, C.m_dataLength * sizeof (double))); // copy newData to m_data
1336 2 : delete[] newData;
1337 2 : C.m_type = MATRIX_DENSE;
1338 2 : status = ForBESUtils::STATUS_HAD_TO_REALLOC;
1339 1 : } else if (type_of_A == MATRIX_SPARSE) { /* SYMMETRIC + SPARSE */
1340 1 : C.m_dataLength = ncols * nrows;
1341 1 : double * newData = new double[C.m_dataLength];
1342 1 : C.m_data = static_cast<double *> (realloc(C.m_data, C.m_dataLength * sizeof (double))); // reallocate memory
1343 :
1344 41 : for (size_t i = 0; i < nrows; i++) {
1345 1640 : for (size_t j = 0; j < ncols; j++) {
1346 1600 : newData[i + j * nrows] = C.get(i, j); /* restructure symmetric C data into dense */
1347 : }
1348 : }
1349 :
1350 1 : C.m_data = static_cast<double *> (memcpy(C.m_data, newData, C.m_dataLength * sizeof (double))); // copy newData to m_data
1351 :
1352 1 : A._createTriplet();
1353 1 : assert(A.m_triplet != NULL);
1354 :
1355 1 : C.m_type = MATRIX_DENSE;
1356 :
1357 91 : for (size_t k = 0; k < A.m_triplet->nnz; k++) {
1358 90 : int i_ = (static_cast<int*> (A.m_triplet->i))[k];
1359 90 : int j_ = (static_cast<int*> (A.m_triplet->j))[k];
1360 90 : C._addIJ(A.m_transpose ? j_ : i_, A.m_transpose ? i_ : j_, alpha * (static_cast<double*> (A.m_triplet->x))[k], gamma);
1361 : }
1362 :
1363 1 : delete[] newData;
1364 1 : status = ForBESUtils::STATUS_HAD_TO_REALLOC;
1365 : }
1366 6 : return status;
1367 : }
1368 :
1369 3932 : int Matrix::generic_add_helper_left_sparse(Matrix& C, double alpha, Matrix& A, double gamma) {
1370 3932 : int status = ForBESUtils::STATUS_OK;
1371 3932 : Matrix::MatrixType type_of_A = A.getType();
1372 3932 : size_t ncols = A.getNcols();
1373 3932 : size_t nrows = A.getNrows();
1374 :
1375 3932 : if (type_of_A == MATRIX_SPARSE) {
1376 3626 : double __gamma_t[1] = {gamma};
1377 3626 : double __alpha_t[1] = {alpha};
1378 :
1379 :
1380 : /*
1381 : * Comments on the following two blocks of code:
1382 : * IMPORTANT: READ ME
1383 : * Don't forget that m_triplet is not really transposed - see #transpose().
1384 : * When, at this point, m_sparse is NULL, the sparse data are stored in
1385 : * m_triplet.
1386 : *
1387 : * Recall that the code in #transpose() regarding sparse matrices is merely:
1388 : *
1389 : * if (m_type == MATRIX_SPARSE) {
1390 : * _createSparse();
1391 : * m_sparse = cholmod_transpose(m_sparse, 1, cholmod_handle());
1392 : * }
1393 : *
1394 : * It doesn't modify m_triplet whatsoever. Now, take a look at #get():
1395 : *
1396 : * double val = 0.0;
1397 : * int i_ = m_transpose ? j : i;
1398 : * int j_ = m_transpose ? i : j;
1399 : * for (size_t k = 0; k < m_triplet->nnz; k++) {
1400 : * if (i_ == (static_cast<int*> (m_triplet->i))[k]) {
1401 : * if (j_ == (static_cast<int*> (m_triplet->j))[k]) {
1402 : * val = (static_cast<double*> (m_triplet->x))[k];
1403 : * break;
1404 : * }
1405 : * }
1406 : * }
1407 : *
1408 : * We swap ::i and ::j to get the correct entry and this is why the output
1409 : * stream operator works as well (it uses #get(size_t, size_t)).
1410 : *
1411 : * ERGO: If we create m_sparse out of the non-transposed m_triplet, hell
1412 : * will brake loose.
1413 : */
1414 :
1415 3626 : if (C.m_sparse == NULL) {
1416 2125 : C._createSparse(); /* CHOLMOD_SPARSE: create it
1417 : from other representations */
1418 2125 : if (C.m_transpose) {
1419 0 : C.m_sparse = cholmod_transpose(C.m_sparse, 1, cholmod_handle());
1420 : }
1421 : }
1422 :
1423 3626 : if (A.m_sparse == NULL) {
1424 2403 : A._createSparse(); /* Likewise for the RHS */
1425 2403 : if (A.m_transpose) {
1426 300 : A.m_sparse = cholmod_transpose(A.m_sparse, 1, cholmod_handle());
1427 : }
1428 : }
1429 :
1430 : /*
1431 : * Don't store C as transpose any more - it will not have all values
1432 : * in the right order.
1433 : */
1434 3626 : if (C.m_transpose) {
1435 600 : C.m_transpose = false;
1436 : }
1437 :
1438 : C.m_sparse = cholmod_add(
1439 : C.m_sparse,
1440 : A.m_sparse,
1441 : __gamma_t,
1442 : __alpha_t,
1443 : true,
1444 : true,
1445 3626 : Matrix::cholmod_handle()); /* Use cholmod_add to compute the sum C := gamma * C + alpha * A */
1446 :
1447 : C.m_triplet = cholmod_sparse_to_triplet(
1448 : C.m_sparse,
1449 3626 : Matrix::cholmod_handle()); /* Update the triplet of the result (optional) */
1450 :
1451 :
1452 306 : } else if (type_of_A == MATRIX_DIAGONAL) { /* SPARSE + DIAGONAL */
1453 1 : C._createTriplet();
1454 1 : if (C.m_triplet != NULL) {
1455 121 : for (size_t i = 0; i < nrows; i++) {
1456 120 : C._addIJ(i, i, alpha * A.get(i, i), gamma);
1457 : }
1458 : }
1459 305 : } else if (type_of_A == MATRIX_LOWERTR) { /* SPARSE + LOWER TRI = SPARSE */
1460 1 : C._createTriplet();
1461 1 : if (C.m_triplet != NULL) {
1462 21 : for (size_t i = 0; i < nrows; i++) {
1463 460 : for (size_t j = (A.m_transpose ? i : 0);
1464 230 : j <= (A.m_transpose ? A.m_ncols : i); j++) {
1465 210 : C._addIJ(i, j, A.get(i, j), gamma);
1466 : }
1467 : }
1468 : }
1469 304 : } else if (type_of_A == MATRIX_DENSE) { /* SPARSE + DENSE */
1470 303 : C.m_type = MATRIX_DENSE; /* Sparse + Dense = Dense */
1471 303 : C.m_dataLength = A.getNcols() * A.getNrows(); /* Space to be allocated for the dense result */
1472 303 : C.m_data = new double[C.m_dataLength]; /* allocate space */
1473 3434 : for (size_t i = 0; i < C.getNrows(); i++) {
1474 44596 : for (size_t j = 0; j < C.getNcols(); j++) {
1475 41465 : C.set(i, j, gamma * A.get(i, j)); /* store the rhs on m_data (accounting for transpose) */
1476 : }
1477 : }
1478 : //_createTriplet();
1479 303 : if (C.m_triplet != NULL) {
1480 : /* Add triplets */
1481 25031 : for (size_t k = 0; k < C.m_triplet->nnz; k++) {
1482 24728 : int i = (static_cast<int*> (C.m_triplet->i))[k];
1483 24728 : int j = (static_cast<int*> (C.m_triplet->j))[k];
1484 24728 : if (C.m_transpose) {
1485 30 : std::swap(i, j);
1486 : }
1487 24728 : double v = (static_cast<double*> (C.m_triplet->x))[k];
1488 24728 : C._addIJ(i, j, v, alpha);
1489 : }
1490 : }
1491 303 : status = ForBESUtils::STATUS_HAD_TO_REALLOC;
1492 1 : } else if (type_of_A == MATRIX_SYMMETRIC) { /* SPARSE + SYMMETRIC (result is dense) */
1493 1 : C.m_dataLength = C.m_nrows * C.m_ncols;
1494 1 : C.m_data = new double[C.m_dataLength](); // reallocate memory
1495 1 : C.m_type = MATRIX_DENSE;
1496 121 : for (size_t i = 0; i < nrows; i++) {
1497 14520 : for (size_t j = 0; j < ncols; j++) {
1498 14400 : C.set(i, j, gamma * A.get(i, j)); // load symmetric data
1499 : }
1500 : }
1501 1 : C._createTriplet();
1502 61 : for (size_t k = 0; k < C.m_triplet->nnz; k++) {
1503 60 : C._addIJ((static_cast<int*> (C.m_triplet->i))[k],
1504 60 : (static_cast<int*> (C.m_triplet->j))[k],
1505 60 : (static_cast<double*> (C.m_triplet->x))[k],
1506 180 : alpha);
1507 : }
1508 1 : status = ForBESUtils::STATUS_HAD_TO_REALLOC;
1509 : }
1510 3932 : return status;
1511 : }
1512 :
1513 1 : int Matrix::generic_add_helper_left_diagonal(Matrix& C, double alpha, Matrix& A, double gamma) {
1514 1 : bool is_gamma_one = (std::abs(gamma - 1.0) < std::numeric_limits<double>::epsilon());
1515 1 : if (MATRIX_DIAGONAL == A.m_type) { /* Diagonal += Diagonal */
1516 1 : if (is_gamma_one) {
1517 1 : cblas_daxpy(A.length(), alpha, A.m_data, 1, C.m_data, 1);
1518 : } else {
1519 0 : for (size_t i = 0; i < A.length(); i++) {
1520 0 : C.m_data[i] = (gamma * C.m_data[i]) + (alpha * A.m_data[i]);
1521 : }
1522 : }
1523 : } else {
1524 0 : throw std::logic_error("Diagonal + Non-diagonal: not supported yet!");
1525 : }
1526 1 : return ForBESUtils::STATUS_OK;
1527 : }
1528 :
1529 2 : int Matrix::generic_add_helper_left_lower_tri(Matrix& C, double alpha, Matrix& A, double gamma) {
1530 2 : bool is_gamma_one = (std::abs(gamma - 1.0) < std::numeric_limits<double>::epsilon());
1531 2 : if (A.m_type == Matrix::MATRIX_LOWERTR) { /* LOWER + LOWER = LOWER */
1532 1 : if (is_gamma_one) {
1533 1 : cblas_daxpy(A.length(), alpha, A.m_data, 1, C.m_data, 1);
1534 : } else {
1535 0 : for (size_t i = 0; i < A.length(); i++) {
1536 0 : C.m_data[i] = (gamma * C.m_data[i]) + (alpha * A.m_data[i]);
1537 : }
1538 : }
1539 1 : } else if (A.m_type == Matrix::MATRIX_DIAGONAL) {
1540 121 : for (size_t i = 0; i < A.m_nrows; i++) {
1541 120 : C._addIJ(i, i, alpha * A.get(i, i), gamma);
1542 : }
1543 : } else {
1544 0 : throw std::logic_error("Lower-triangular + Non-lower triangular): not supported yet!");
1545 : }
1546 2 : return ForBESUtils::STATUS_OK;
1547 : }
1548 :
1549 118470 : int Matrix::mult(Matrix& C, double alpha, Matrix& A, Matrix& B, double gamma) {
1550 : // A and C must have compatible dimensions
1551 118470 : if (A.getNcols() != B.getNrows()) {
1552 1 : std::ostringstream oss;
1553 1 : oss << "A (" << A.getNrows() << "x" << A.getNcols()
1554 2 : << ") and B (" << B.getNrows() << "x" << B.getNcols()
1555 1 : << ") do not have compatible dimensions";
1556 1 : throw std::invalid_argument(oss.str().c_str());
1557 : }
1558 : /* C must have proper dimensions */
1559 118469 : if (C.getNrows() != A.getNrows() || C.getNcols() != B.getNcols()) {
1560 1 : std::ostringstream oss;
1561 1 : oss << "C is " << C.getNrows() << "x" << C.getNrows()
1562 2 : << ", but it should be " << A.getNrows() << "x"
1563 2 : << B.getNcols();
1564 1 : throw std::invalid_argument(oss.str().c_str());
1565 : }
1566 : // C := gamma * C + alpha * A * B
1567 118468 : int status = ForBESUtils::STATUS_UNDEFINED_FUNCTION;
1568 118468 : switch (A.getType()) {
1569 : case MATRIX_DENSE: /* DENSE += ? */
1570 116498 : status = multiply_helper_left_dense(C, alpha, A, B, gamma);
1571 116498 : break;
1572 : case MATRIX_SYMMETRIC: /* SYMMETRIC += ? */
1573 118 : status = multiply_helper_left_symmetric(C, alpha, A, B, gamma);
1574 118 : break;
1575 : case MATRIX_LOWERTR: /* LOWER TRIANGULAR += ? */
1576 : // status = generic_add_helper_left_lower_tri(C, alpha, A, gamma);
1577 0 : break;
1578 : case MATRIX_DIAGONAL: /* DIAGONAL += ? */
1579 52 : status = multiply_helper_left_diagonal(C, alpha, A, B, gamma);
1580 52 : break;
1581 : case MATRIX_SPARSE: /* SPARSE += ? */
1582 1800 : status = multiply_helper_left_sparse(C, alpha, A, B, gamma);
1583 1800 : break;
1584 : default:
1585 0 : break;
1586 : }
1587 118468 : return status;
1588 : }
1589 :
1590 116498 : int Matrix::multiply_helper_left_dense(Matrix& C, double alpha, Matrix& A, Matrix& B, double gamma) {
1591 : /* A is dense */
1592 116498 : int status = ForBESUtils::STATUS_OK;
1593 116498 : if (C.m_dataLength < C.getNrows() * C.getNcols()) {
1594 0 : C = Matrix(C.getNrows(), C.getNcols(), Matrix::MATRIX_DENSE);
1595 0 : status = ForBESUtils::STATUS_HAD_TO_REALLOC;
1596 : }
1597 116498 : C.m_type = Matrix::MATRIX_DENSE;
1598 116498 : if (MATRIX_DENSE == B.m_type) { // B is also dense
1599 : cblas_dgemm(CblasColMajor,
1600 : A.m_transpose ? CblasTrans : CblasNoTrans,
1601 : B.m_transpose ? CblasTrans : CblasNoTrans,
1602 : A.m_nrows,
1603 : B.m_ncols,
1604 : A.m_ncols,
1605 : alpha,
1606 : A.m_data,
1607 : A.m_transpose ? A.m_ncols : A.m_nrows,
1608 : B.m_data,
1609 : B.m_transpose ? B.m_ncols : B.m_nrows,
1610 : gamma,
1611 : C.m_data,
1612 116098 : C.m_nrows);
1613 116098 : status = std::max(status, ForBESUtils::STATUS_OK);
1614 400 : } else if (MATRIX_DIAGONAL == B.m_type) { // {DENSE} * {DIAGONAL} = {DENSE} - B is diagonal
1615 2100 : for (size_t j = 0; j < C.getNcols(); j++) {
1616 16200 : for (size_t i = 0; i < C.getNrows(); i++) {
1617 14400 : C._addIJ(i, j, alpha * A.get(i, j) * B.get(j, j), gamma);
1618 : }
1619 : }
1620 300 : status = ForBESUtils::STATUS_OK;
1621 100 : } else if (MATRIX_SYMMETRIC == B.m_type || MATRIX_LOWERTR == B.m_type) {
1622 100 : domm(C, alpha, A, B, gamma);
1623 100 : status = ForBESUtils::STATUS_OK;
1624 : } else { /* {DENSE} * {SPARSE} = */
1625 : /*
1626 : * Trick: For D: dense, S: sparse it is
1627 : * D * S = (S' * D')'
1628 : */
1629 : // Matrix tempSparse(right);
1630 : // (const_cast<Matrix&> (*this)).transpose(); /* D' */
1631 : // tempSparse.transpose(); /* S' */
1632 : // Matrix r = tempSparse * (const_cast<Matrix&> (*this)); /* r = S' * D' */
1633 : // r.transpose(); /* r' */
1634 : // (const_cast<Matrix&> (*this)).transpose(); /* D */
1635 : // return r;
1636 : }
1637 116498 : return status;
1638 : }
1639 :
1640 1800 : int Matrix::multiply_helper_left_sparse(Matrix& C, double alpha, Matrix& A, Matrix& B, double gamma) {
1641 1800 : bool is_alpha_one = (std::abs(alpha - 1.0) < std::numeric_limits<double>::epsilon());
1642 1800 : bool is_gamma_zero = (std::abs(gamma) < std::numeric_limits<double>::epsilon());
1643 : // bool is_gamma_one = (std::abs(gamma) < std::numeric_limits<double>::epsilon());
1644 1800 : int status = ForBESUtils::STATUS_UNDEFINED_FUNCTION;
1645 1800 : if (B.m_type == MATRIX_SPARSE) {
1646 : // RHS is sparse
1647 1200 : if (A.m_sparse == NULL) {
1648 300 : A._createSparse();
1649 : }
1650 1200 : if (B.m_sparse == NULL) {
1651 0 : B._createSparse();
1652 : }
1653 1200 : if (C.m_sparse == NULL) {
1654 1200 : C._createSparse();
1655 : }
1656 : cholmod_sparse *r; // r will store A * B
1657 : r = cholmod_ssmult(
1658 1200 : (A.isColumnVector() && B.isColumnVector())
1659 0 : ? cholmod_transpose(A.m_sparse, 1, Matrix::cholmod_handle())
1660 : : A.m_sparse,
1661 : B.m_sparse,
1662 : 0,
1663 : true,
1664 : false,
1665 2400 : Matrix::cholmod_handle()); // r = A*B
1666 :
1667 : /*
1668 : * SCALE: r *= alpha (unless alpha == 1)
1669 : */
1670 1200 : if (!is_alpha_one) {
1671 12000 : for (size_t j = 0; j < C.m_ncols; j++) {
1672 10800 : int p = (static_cast<int*> (r->p))[j];
1673 10800 : int pend = (r->packed == 1)
1674 10800 : ? ((static_cast<int*> (r->p))[j + 1])
1675 21600 : : p + (static_cast<int*> (r->nz))[j];
1676 108815 : for (; p < pend; p++) {
1677 98015 : (static_cast<double*> (r->x))[p] *= alpha;
1678 : }
1679 : }
1680 : }
1681 :
1682 :
1683 1200 : if (is_gamma_zero) {
1684 300 : cholmod_triplet * r_to_triplet = cholmod_sparse_to_triplet(r, Matrix::cholmod_handle());
1685 : // C := alpha * A * B = r
1686 300 : C.m_sparse = r;
1687 300 : C.m_triplet = r_to_triplet;
1688 300 : status = ForBESUtils::STATUS_OK;
1689 : } else {
1690 900 : Matrix temp_r = Matrix(true);
1691 900 : temp_r.m_nrows = C.getNrows();
1692 900 : temp_r.m_ncols = C.getNcols();
1693 900 : temp_r.m_sparse = cholmod_copy_sparse(r, cholmod_handle());
1694 900 : temp_r.m_type = MATRIX_SPARSE;
1695 900 : temp_r.m_triplet = cholmod_sparse_to_triplet(temp_r.m_sparse, Matrix::cholmod_handle());
1696 900 : add(C, 1.0, temp_r, gamma);
1697 900 : status = ForBESUtils::STATUS_OK;
1698 : }
1699 600 : } else if (B.m_type == MATRIX_DENSE) { /* C = gamma * C + alpha * SPARSE * DENSE */
1700 :
1701 :
1702 : // RHS is dense
1703 : // Matrix result(A.getNrows(), B..getNcols());
1704 :
1705 : // if (A.m_triplet != NULL && A.m_sparse == NULL)
1706 : // A._createSparse();
1707 : //
1708 : // double alpha[2] = {1.0, 0.0};
1709 : // double beta[2] = {0.0, 0.0};
1710 : //
1711 : // if (B.m_dense == NULL) { /* Prepare right.m_dense */
1712 : // B.m_dense = cholmod_allocate_dense(
1713 : // B.getNrows(),
1714 : // B.getNcols(),
1715 : // B.getNrows(),
1716 : // CHOLMOD_REAL,
1717 : // Matrix::cholmod_handle());
1718 : //
1719 : // if (!B.m_transpose) {
1720 : // B.m_dense->x = B.m_data;
1721 : // } else {
1722 : // /* Store RHS as transpose into right.m_dense */
1723 : // for (size_t i = 0; i < B.getNrows(); i++) { /* SPARSE x DENSE' */
1724 : // for (size_t j = 0; j < B.getNcols(); j++) {
1725 : // (static_cast<double*> (B.m_dense->x))[i + j * B.getNrows()] = B.get(i, j);
1726 : // }
1727 : // }
1728 : // }
1729 : // }
1730 : //
1731 : // bool dotProd = isColumnVector() && B.isColumnVector();
1732 : // C.m_dense = cholmod_allocate_dense(
1733 : // dotProd ? 1 : B.getNrows(),
1734 : // dotProd ? 1 : B.getNcols(),
1735 : // dotProd ? 1 : B.getNrows(),
1736 : // CHOLMOD_REAL,
1737 : // Matrix::cholmod_handle());
1738 :
1739 : // if (m_transpose) {
1740 : // this->transpose();
1741 : // }
1742 : // cholmod_sdmult(
1743 : // m_sparse,
1744 : // dotProd ? true : m_transpose,
1745 : // alpha,
1746 : // beta,
1747 : // right.m_dense,
1748 : // result.m_dense,
1749 : // Matrix::cholmod_handle()
1750 : // );
1751 : // if (m_transpose) {
1752 : // this->transpose();
1753 : // }
1754 : // for (size_t k = 0; k < result.length(); k++) {
1755 : // result.m_data[k] = (static_cast<double*> (result.m_dense->x))[k];
1756 : // }
1757 :
1758 :
1759 600 : } else if (B.m_type == MATRIX_DIAGONAL) { // += alpha * SPARSE * DIAGONAL
1760 600 : Matrix A_temp(A); // Compute A_temp = A * alpha;
1761 24600 : for (size_t k = 0; k < A.m_triplet->nnz; k++) {
1762 24000 : int j_ = (static_cast<int*> (A_temp.m_triplet->j))[k];
1763 24000 : static_cast<double*> (A_temp.m_triplet->x)[k] *= (alpha * B.get(j_, j_));
1764 : }
1765 600 : status = add(C, 1.0, A_temp, gamma);
1766 : } else {
1767 : //LCOV_EXCL_START
1768 : throw std::invalid_argument("SPARSE * {SYMMETRIC/LOWER/UPPER TRIANGUAL}: not supported");
1769 : //LCOV_EXCL_STOP
1770 : }
1771 1800 : return status;
1772 : }
1773 :
1774 52 : int Matrix::multiply_helper_left_diagonal(Matrix& C, double alpha, Matrix& A, Matrix& B, double gamma) {
1775 36671 : for (size_t i = 0; i < C.m_nrows; i++) {
1776 36619 : if (MATRIX_SYMMETRIC == B.m_type) {
1777 0 : for (size_t j = i; j < B.m_ncols; j++) {
1778 0 : C._addIJ(i, j, alpha * A.m_data[i] * B.get(i, j), gamma);
1779 : }
1780 36619 : } else if (MATRIX_DENSE == B.m_type) {
1781 73238 : for (size_t j = 0; j < B.m_ncols; j++) {
1782 36619 : C._addIJ(i, j, alpha * A.m_data[i] * B.get(i, j), gamma);
1783 : }
1784 0 : } else if (MATRIX_DIAGONAL == B.m_type) {
1785 0 : C._addIJ(i, i, alpha * A.m_data[i] * B.m_data[i], gamma);
1786 0 : } else if (MATRIX_LOWERTR == B.m_type) {
1787 0 : for (size_t j = 0; j <= i; j++) {
1788 0 : C._addIJ(i, j, alpha * A.m_data[i] * B.get(i, j), gamma);
1789 : }
1790 : }
1791 : }
1792 52 : return ForBESUtils::STATUS_OK;
1793 : }
1794 :
1795 118 : int Matrix::multiply_helper_left_symmetric(Matrix& C, double alpha, Matrix& A, Matrix& B, double gamma) {
1796 : // multiply when the LHS is symmetric
1797 118 : if (B.isColumnVector()) {
1798 : cblas_dspmv(CblasColMajor,
1799 : CblasLower,
1800 : A.m_nrows,
1801 : alpha,
1802 : A.m_data,
1803 : B.m_data,
1804 : 1,
1805 : gamma,
1806 : C.m_data,
1807 118 : 1);
1808 : } else {
1809 0 : domm(C, alpha, A, B, gamma);
1810 : }
1811 118 : return ForBESUtils::STATUS_OK;
1812 120 : }
|