linalg.h

Go to the documentation of this file.
1 /*************************************************************************
2 ALGLIB 3.11.0 (source code generated 2017-05-11)
3 Copyright (c) Sergey Bochkanov (ALGLIB project).
4 
5 >>> SOURCE LICENSE >>>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation (www.fsf.org); either version 2 of the
9 License, or (at your option) any later version.
10 
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15 
16 A copy of the GNU General Public License is available at
17 http://www.fsf.org/licensing/licenses
18 >>> END OF LICENSE >>>
19 *************************************************************************/
20 #ifndef _linalg_pkg_h
21 #define _linalg_pkg_h
22 #include "ap.h"
23 #include "alglibinternal.h"
24 #include "alglibmisc.h"
25 
27 //
28 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
29 //
31 namespace alglib_impl
32 {
33 typedef struct
34 {
35  ae_vector vals;
36  ae_vector idx;
37  ae_vector ridx;
38  ae_vector didx;
39  ae_vector uidx;
40  ae_int_t matrixtype;
41  ae_int_t m;
42  ae_int_t n;
43  ae_int_t nfree;
44  ae_int_t ninitialized;
45  ae_int_t tablesize;
46 } sparsematrix;
47 typedef struct
48 {
49  ae_vector d;
50  ae_vector u;
53 typedef struct
54 {
55  double r1;
56  double rinf;
57 } matinvreport;
58 typedef struct
59 {
60  double e1;
61  double e2;
62  ae_vector x;
63  ae_vector ax;
64  double xax;
66  ae_vector rk;
67  ae_vector rk1;
69  ae_vector xk1;
70  ae_vector pk;
71  ae_vector pk1;
73  rcommstate rstate;
74  ae_vector tmp2;
76 typedef struct
77 {
80  ae_int_t nstart;
81  ae_int_t nits;
82  ae_int_t seedval;
86  ae_vector xbest;
90  ae_vector mtv;
91  ae_bool needmv;
92  ae_bool needmtv;
93  double repnorm;
94  rcommstate rstate;
96 typedef struct
97 {
100  ae_int_t nwork;
101  ae_int_t maxits;
102  double eps;
103  ae_int_t eigenvectorsneeded;
104  ae_int_t matrixtype;
106  ae_bool running;
108  ae_matrix qcur;
109  ae_matrix znew;
113  ae_matrix rq;
114  ae_matrix dummy;
115  ae_vector rw;
117  ae_vector wcur;
118  ae_vector wprev;
119  ae_vector wrank;
123  ae_int_t requesttype;
124  ae_int_t requestsize;
125  ae_int_t repiterationscount;
126  rcommstate rstate;
128 typedef struct
129 {
130  ae_int_t iterationscount;
132 
133 }
134 
136 //
137 // THIS SECTION CONTAINS C++ INTERFACE
138 //
140 namespace alglib
141 {
142 
143 /*************************************************************************
144 Sparse matrix structure.
145 
146 You should use ALGLIB functions to work with sparse matrix. Never try to
147 access its fields directly!
148 
149 NOTES ON THE SPARSE STORAGE FORMATS
150 
151 Sparse matrices can be stored using several formats:
152 * Hash-Table representation
153 * Compressed Row Storage (CRS)
154 * Skyline matrix storage (SKS)
155 
156 Each of the formats has benefits and drawbacks:
157 * Hash-table is good for dynamic operations (insertion of new elements),
158  but does not support linear algebra operations
159 * CRS is good for operations like matrix-vector or matrix-matrix products,
160  but its initialization is less convenient - you have to tell row sizes
161  at the initialization, and you have to fill matrix only row by row,
162  from left to right.
163 * SKS is a special format which is used to store triangular factors from
164  Cholesky factorization. It does not support dynamic modification, and
165  support for linear algebra operations is very limited.
166 
167 Tables below outline information about these two formats:
168 
169  OPERATIONS WITH MATRIX HASH CRS SKS
170  creation + + +
171  SparseGet + + +
172  SparseRewriteExisting + + +
173  SparseSet +
174  SparseAdd +
175  SparseGetRow + +
176  SparseGetCompressedRow + +
177  sparse-dense linear algebra + +
178 *************************************************************************/
180 {
181 public:
185  virtual ~_sparsematrix_owner();
188 protected:
190 };
192 {
193 public:
197  virtual ~sparsematrix();
198 
199 };
200 
201 
202 /*************************************************************************
203 Temporary buffers for sparse matrix operations.
204 
205 You should pass an instance of this structure to factorization functions.
206 It allows to reuse memory during repeated sparse factorizations. You do
207 not have to call some initialization function - simply passing an instance
208 to factorization function is enough.
209 *************************************************************************/
211 {
212 public:
216  virtual ~_sparsebuffers_owner();
219 protected:
221 };
223 {
224 public:
228  virtual ~sparsebuffers();
229 
230 };
231 
232 
233 
234 
235 
236 
237 
238 
239 
240 /*************************************************************************
241 Matrix inverse report:
242 * R1 reciprocal of condition number in 1-norm
243 * RInf reciprocal of condition number in inf-norm
244 *************************************************************************/
246 {
247 public:
251  virtual ~_matinvreport_owner();
254 protected:
256 };
258 {
259 public:
263  virtual ~matinvreport();
264  double &r1;
265  double &rinf;
266 
267 };
268 
269 
270 
271 
272 
273 
274 
275 
276 
277 /*************************************************************************
278 This object stores state of the iterative norm estimation algorithm.
279 
280 You should use ALGLIB functions to work with this object.
281 *************************************************************************/
283 {
284 public:
288  virtual ~_normestimatorstate_owner();
291 protected:
293 };
295 {
296 public:
301 
302 };
303 
304 /*************************************************************************
305 This object stores state of the subspace iteration algorithm.
306 
307 You should use ALGLIB functions to work with this object.
308 *************************************************************************/
310 {
311 public:
315  virtual ~_eigsubspacestate_owner();
318 protected:
320 };
322 {
323 public:
327  virtual ~eigsubspacestate();
328 
329 };
330 
331 
332 /*************************************************************************
333 This object stores state of the subspace iteration algorithm.
334 
335 You should use ALGLIB functions to work with this object.
336 *************************************************************************/
338 {
339 public:
343  virtual ~_eigsubspacereport_owner();
346 protected:
348 };
350 {
351 public:
357 
358 };
359 
360 /*************************************************************************
361 This function creates sparse matrix in a Hash-Table format.
362 
363 This function creates Hast-Table matrix, which can be converted to CRS
364 format after its initialization is over. Typical usage scenario for a
365 sparse matrix is:
366 1. creation in a Hash-Table format
367 2. insertion of the matrix elements
368 3. conversion to the CRS representation
369 4. matrix is passed to some linear algebra algorithm
370 
371 Some information about different matrix formats can be found below, in
372 the "NOTES" section.
373 
374 INPUT PARAMETERS
375  M - number of rows in a matrix, M>=1
376  N - number of columns in a matrix, N>=1
377  K - K>=0, expected number of non-zero elements in a matrix.
378  K can be inexact approximation, can be less than actual
379  number of elements (table will grow when needed) or
380  even zero).
381  It is important to understand that although hash-table
382  may grow automatically, it is better to provide good
383  estimate of data size.
384 
385 OUTPUT PARAMETERS
386  S - sparse M*N matrix in Hash-Table representation.
387  All elements of the matrix are zero.
388 
389 NOTE 1
390 
391 Hash-tables use memory inefficiently, and they have to keep some amount
392 of the "spare memory" in order to have good performance. Hash table for
393 matrix with K non-zero elements will need C*K*(8+2*sizeof(int)) bytes,
394 where C is a small constant, about 1.5-2 in magnitude.
395 
396 CRS storage, from the other side, is more memory-efficient, and needs
397 just K*(8+sizeof(int))+M*sizeof(int) bytes, where M is a number of rows
398 in a matrix.
399 
400 When you convert from the Hash-Table to CRS representation, all unneeded
401 memory will be freed.
402 
403 NOTE 2
404 
405 Comments of SparseMatrix structure outline information about different
406 sparse storage formats. We recommend you to read them before starting to
407 use ALGLIB sparse matrices.
408 
409 NOTE 3
410 
411 This function completely overwrites S with new sparse matrix. Previously
412 allocated storage is NOT reused. If you want to reuse already allocated
413 memory, call SparseCreateBuf function.
414 
415  -- ALGLIB PROJECT --
416  Copyright 14.10.2011 by Bochkanov Sergey
417 *************************************************************************/
418 void sparsecreate(const ae_int_t m, const ae_int_t n, const ae_int_t k, sparsematrix &s);
419 void sparsecreate(const ae_int_t m, const ae_int_t n, sparsematrix &s);
420 
421 
422 /*************************************************************************
423 This version of SparseCreate function creates sparse matrix in Hash-Table
424 format, reusing previously allocated storage as much as possible. Read
425 comments for SparseCreate() for more information.
426 
427 INPUT PARAMETERS
428  M - number of rows in a matrix, M>=1
429  N - number of columns in a matrix, N>=1
430  K - K>=0, expected number of non-zero elements in a matrix.
431  K can be inexact approximation, can be less than actual
432  number of elements (table will grow when needed) or
433  even zero).
434  It is important to understand that although hash-table
435  may grow automatically, it is better to provide good
436  estimate of data size.
437  S - SparseMatrix structure which MAY contain some already
438  allocated storage.
439 
440 OUTPUT PARAMETERS
441  S - sparse M*N matrix in Hash-Table representation.
442  All elements of the matrix are zero.
443  Previously allocated storage is reused, if its size
444  is compatible with expected number of non-zeros K.
445 
446  -- ALGLIB PROJECT --
447  Copyright 14.01.2014 by Bochkanov Sergey
448 *************************************************************************/
449 void sparsecreatebuf(const ae_int_t m, const ae_int_t n, const ae_int_t k, const sparsematrix &s);
450 void sparsecreatebuf(const ae_int_t m, const ae_int_t n, const sparsematrix &s);
451 
452 
453 /*************************************************************************
454 This function creates sparse matrix in a CRS format (expert function for
455 situations when you are running out of memory).
456 
457 This function creates CRS matrix. Typical usage scenario for a CRS matrix
458 is:
459 1. creation (you have to tell number of non-zero elements at each row at
460  this moment)
461 2. insertion of the matrix elements (row by row, from left to right)
462 3. matrix is passed to some linear algebra algorithm
463 
464 This function is a memory-efficient alternative to SparseCreate(), but it
465 is more complex because it requires you to know in advance how large your
466 matrix is. Some information about different matrix formats can be found
467 in comments on SparseMatrix structure. We recommend you to read them
468 before starting to use ALGLIB sparse matrices..
469 
470 INPUT PARAMETERS
471  M - number of rows in a matrix, M>=1
472  N - number of columns in a matrix, N>=1
473  NER - number of elements at each row, array[M], NER[I]>=0
474 
475 OUTPUT PARAMETERS
476  S - sparse M*N matrix in CRS representation.
477  You have to fill ALL non-zero elements by calling
478  SparseSet() BEFORE you try to use this matrix.
479 
480 NOTE: this function completely overwrites S with new sparse matrix.
481  Previously allocated storage is NOT reused. If you want to reuse
482  already allocated memory, call SparseCreateCRSBuf function.
483 
484  -- ALGLIB PROJECT --
485  Copyright 14.10.2011 by Bochkanov Sergey
486 *************************************************************************/
487 void sparsecreatecrs(const ae_int_t m, const ae_int_t n, const integer_1d_array &ner, sparsematrix &s);
488 
489 
490 /*************************************************************************
491 This function creates sparse matrix in a CRS format (expert function for
492 situations when you are running out of memory). This version of CRS
493 matrix creation function may reuse memory already allocated in S.
494 
495 This function creates CRS matrix. Typical usage scenario for a CRS matrix
496 is:
497 1. creation (you have to tell number of non-zero elements at each row at
498  this moment)
499 2. insertion of the matrix elements (row by row, from left to right)
500 3. matrix is passed to some linear algebra algorithm
501 
502 This function is a memory-efficient alternative to SparseCreate(), but it
503 is more complex because it requires you to know in advance how large your
504 matrix is. Some information about different matrix formats can be found
505 in comments on SparseMatrix structure. We recommend you to read them
506 before starting to use ALGLIB sparse matrices..
507 
508 INPUT PARAMETERS
509  M - number of rows in a matrix, M>=1
510  N - number of columns in a matrix, N>=1
511  NER - number of elements at each row, array[M], NER[I]>=0
512  S - sparse matrix structure with possibly preallocated
513  memory.
514 
515 OUTPUT PARAMETERS
516  S - sparse M*N matrix in CRS representation.
517  You have to fill ALL non-zero elements by calling
518  SparseSet() BEFORE you try to use this matrix.
519 
520  -- ALGLIB PROJECT --
521  Copyright 14.10.2011 by Bochkanov Sergey
522 *************************************************************************/
523 void sparsecreatecrsbuf(const ae_int_t m, const ae_int_t n, const integer_1d_array &ner, const sparsematrix &s);
524 
525 
526 /*************************************************************************
527 This function creates sparse matrix in a SKS format (skyline storage
528 format). In most cases you do not need this function - CRS format better
529 suits most use cases.
530 
531 INPUT PARAMETERS
532  M, N - number of rows(M) and columns (N) in a matrix:
533  * M=N (as for now, ALGLIB supports only square SKS)
534  * N>=1
535  * M>=1
536  D - "bottom" bandwidths, array[M], D[I]>=0.
537  I-th element stores number of non-zeros at I-th row,
538  below the diagonal (diagonal itself is not included)
539  U - "top" bandwidths, array[N], U[I]>=0.
540  I-th element stores number of non-zeros at I-th row,
541  above the diagonal (diagonal itself is not included)
542 
543 OUTPUT PARAMETERS
544  S - sparse M*N matrix in SKS representation.
545  All elements are filled by zeros.
546  You may use SparseRewriteExisting() to change their
547  values.
548 
549 NOTE: this function completely overwrites S with new sparse matrix.
550  Previously allocated storage is NOT reused. If you want to reuse
551  already allocated memory, call SparseCreateSKSBuf function.
552 
553  -- ALGLIB PROJECT --
554  Copyright 13.01.2014 by Bochkanov Sergey
555 *************************************************************************/
556 void sparsecreatesks(const ae_int_t m, const ae_int_t n, const integer_1d_array &d, const integer_1d_array &u, sparsematrix &s);
557 
558 
559 /*************************************************************************
560 This is "buffered" version of SparseCreateSKS() which reuses memory
561 previously allocated in S (of course, memory is reallocated if needed).
562 
563 This function creates sparse matrix in a SKS format (skyline storage
564 format). In most cases you do not need this function - CRS format better
565 suits most use cases.
566 
567 INPUT PARAMETERS
568  M, N - number of rows(M) and columns (N) in a matrix:
569  * M=N (as for now, ALGLIB supports only square SKS)
570  * N>=1
571  * M>=1
572  D - "bottom" bandwidths, array[M], 0<=D[I]<=I.
573  I-th element stores number of non-zeros at I-th row,
574  below the diagonal (diagonal itself is not included)
575  U - "top" bandwidths, array[N], 0<=U[I]<=I.
576  I-th element stores number of non-zeros at I-th row,
577  above the diagonal (diagonal itself is not included)
578 
579 OUTPUT PARAMETERS
580  S - sparse M*N matrix in SKS representation.
581  All elements are filled by zeros.
582  You may use SparseSet()/SparseAdd() to change their
583  values.
584 
585  -- ALGLIB PROJECT --
586  Copyright 13.01.2014 by Bochkanov Sergey
587 *************************************************************************/
588 void sparsecreatesksbuf(const ae_int_t m, const ae_int_t n, const integer_1d_array &d, const integer_1d_array &u, const sparsematrix &s);
589 
590 
591 /*************************************************************************
592 This function copies S0 to S1.
593 This function completely deallocates memory owned by S1 before creating a
594 copy of S0. If you want to reuse memory, use SparseCopyBuf.
595 
596 NOTE: this function does not verify its arguments, it just copies all
597 fields of the structure.
598 
599  -- ALGLIB PROJECT --
600  Copyright 14.10.2011 by Bochkanov Sergey
601 *************************************************************************/
602 void sparsecopy(const sparsematrix &s0, sparsematrix &s1);
603 
604 
605 /*************************************************************************
606 This function copies S0 to S1.
607 Memory already allocated in S1 is reused as much as possible.
608 
609 NOTE: this function does not verify its arguments, it just copies all
610 fields of the structure.
611 
612  -- ALGLIB PROJECT --
613  Copyright 14.10.2011 by Bochkanov Sergey
614 *************************************************************************/
615 void sparsecopybuf(const sparsematrix &s0, const sparsematrix &s1);
616 
617 
618 /*************************************************************************
619 This function efficiently swaps contents of S0 and S1.
620 
621  -- ALGLIB PROJECT --
622  Copyright 16.01.2014 by Bochkanov Sergey
623 *************************************************************************/
624 void sparseswap(const sparsematrix &s0, const sparsematrix &s1);
625 
626 
627 /*************************************************************************
628 This function adds value to S[i,j] - element of the sparse matrix. Matrix
629 must be in a Hash-Table mode.
630 
631 In case S[i,j] already exists in the table, V i added to its value. In
632 case S[i,j] is non-existent, it is inserted in the table. Table
633 automatically grows when necessary.
634 
635 INPUT PARAMETERS
636  S - sparse M*N matrix in Hash-Table representation.
637  Exception will be thrown for CRS matrix.
638  I - row index of the element to modify, 0<=I<M
639  J - column index of the element to modify, 0<=J<N
640  V - value to add, must be finite number
641 
642 OUTPUT PARAMETERS
643  S - modified matrix
644 
645 NOTE 1: when S[i,j] is exactly zero after modification, it is deleted
646 from the table.
647 
648  -- ALGLIB PROJECT --
649  Copyright 14.10.2011 by Bochkanov Sergey
650 *************************************************************************/
651 void sparseadd(const sparsematrix &s, const ae_int_t i, const ae_int_t j, const double v);
652 
653 
654 /*************************************************************************
655 This function modifies S[i,j] - element of the sparse matrix.
656 
657 For Hash-based storage format:
658 * this function can be called at any moment - during matrix initialization
659  or later
660 * new value can be zero or non-zero. In case new value of S[i,j] is zero,
661  this element is deleted from the table.
662 * this function has no effect when called with zero V for non-existent
663  element.
664 
665 For CRS-bases storage format:
666 * this function can be called ONLY DURING MATRIX INITIALIZATION
667 * new value MUST be non-zero. Exception will be thrown for zero V.
668 * elements must be initialized in correct order - from top row to bottom,
669  within row - from left to right.
670 
671 For SKS storage: NOT SUPPORTED! Use SparseRewriteExisting() to work with
672 SKS matrices.
673 
674 INPUT PARAMETERS
675  S - sparse M*N matrix in Hash-Table or CRS representation.
676  I - row index of the element to modify, 0<=I<M
677  J - column index of the element to modify, 0<=J<N
678  V - value to set, must be finite number, can be zero
679 
680 OUTPUT PARAMETERS
681  S - modified matrix
682 
683  -- ALGLIB PROJECT --
684  Copyright 14.10.2011 by Bochkanov Sergey
685 *************************************************************************/
686 void sparseset(const sparsematrix &s, const ae_int_t i, const ae_int_t j, const double v);
687 
688 
689 /*************************************************************************
690 This function returns S[i,j] - element of the sparse matrix. Matrix can
691 be in any mode (Hash-Table, CRS, SKS), but this function is less efficient
692 for CRS matrices. Hash-Table and SKS matrices can find element in O(1)
693 time, while CRS matrices need O(log(RS)) time, where RS is an number of
694 non-zero elements in a row.
695 
696 INPUT PARAMETERS
697  S - sparse M*N matrix in Hash-Table representation.
698  Exception will be thrown for CRS matrix.
699  I - row index of the element to modify, 0<=I<M
700  J - column index of the element to modify, 0<=J<N
701 
702 RESULT
703  value of S[I,J] or zero (in case no element with such index is found)
704 
705  -- ALGLIB PROJECT --
706  Copyright 14.10.2011 by Bochkanov Sergey
707 *************************************************************************/
708 double sparseget(const sparsematrix &s, const ae_int_t i, const ae_int_t j);
709 
710 
711 /*************************************************************************
712 This function returns I-th diagonal element of the sparse matrix.
713 
714 Matrix can be in any mode (Hash-Table or CRS storage), but this function
715 is most efficient for CRS matrices - it requires less than 50 CPU cycles
716 to extract diagonal element. For Hash-Table matrices we still have O(1)
717 query time, but function is many times slower.
718 
719 INPUT PARAMETERS
720  S - sparse M*N matrix in Hash-Table representation.
721  Exception will be thrown for CRS matrix.
722  I - index of the element to modify, 0<=I<min(M,N)
723 
724 RESULT
725  value of S[I,I] or zero (in case no element with such index is found)
726 
727  -- ALGLIB PROJECT --
728  Copyright 14.10.2011 by Bochkanov Sergey
729 *************************************************************************/
730 double sparsegetdiagonal(const sparsematrix &s, const ae_int_t i);
731 
732 
733 /*************************************************************************
734 This function calculates matrix-vector product S*x. Matrix S must be
735 stored in CRS or SKS format (exception will be thrown otherwise).
736 
737 INPUT PARAMETERS
738  S - sparse M*N matrix in CRS or SKS format.
739  X - array[N], input vector. For performance reasons we
740  make only quick checks - we check that array size is
741  at least N, but we do not check for NAN's or INF's.
742  Y - output buffer, possibly preallocated. In case buffer
743  size is too small to store result, this buffer is
744  automatically resized.
745 
746 OUTPUT PARAMETERS
747  Y - array[M], S*x
748 
749 NOTE: this function throws exception when called for non-CRS/SKS matrix.
750 You must convert your matrix with SparseConvertToCRS/SKS() before using
751 this function.
752 
753  -- ALGLIB PROJECT --
754  Copyright 14.10.2011 by Bochkanov Sergey
755 *************************************************************************/
756 void sparsemv(const sparsematrix &s, const real_1d_array &x, real_1d_array &y);
757 
758 
759 /*************************************************************************
760 This function calculates matrix-vector product S^T*x. Matrix S must be
761 stored in CRS or SKS format (exception will be thrown otherwise).
762 
763 INPUT PARAMETERS
764  S - sparse M*N matrix in CRS or SKS format.
765  X - array[M], input vector. For performance reasons we
766  make only quick checks - we check that array size is
767  at least M, but we do not check for NAN's or INF's.
768  Y - output buffer, possibly preallocated. In case buffer
769  size is too small to store result, this buffer is
770  automatically resized.
771 
772 OUTPUT PARAMETERS
773  Y - array[N], S^T*x
774 
775 NOTE: this function throws exception when called for non-CRS/SKS matrix.
776 You must convert your matrix with SparseConvertToCRS/SKS() before using
777 this function.
778 
779  -- ALGLIB PROJECT --
780  Copyright 14.10.2011 by Bochkanov Sergey
781 *************************************************************************/
782 void sparsemtv(const sparsematrix &s, const real_1d_array &x, real_1d_array &y);
783 
784 
785 /*************************************************************************
786 This function simultaneously calculates two matrix-vector products:
787  S*x and S^T*x.
788 S must be square (non-rectangular) matrix stored in CRS or SKS format
789 (exception will be thrown otherwise).
790 
791 INPUT PARAMETERS
792  S - sparse N*N matrix in CRS or SKS format.
793  X - array[N], input vector. For performance reasons we
794  make only quick checks - we check that array size is
795  at least N, but we do not check for NAN's or INF's.
796  Y0 - output buffer, possibly preallocated. In case buffer
797  size is too small to store result, this buffer is
798  automatically resized.
799  Y1 - output buffer, possibly preallocated. In case buffer
800  size is too small to store result, this buffer is
801  automatically resized.
802 
803 OUTPUT PARAMETERS
804  Y0 - array[N], S*x
805  Y1 - array[N], S^T*x
806 
807 NOTE: this function throws exception when called for non-CRS/SKS matrix.
808 You must convert your matrix with SparseConvertToCRS/SKS() before using
809 this function.
810 
811  -- ALGLIB PROJECT --
812  Copyright 14.10.2011 by Bochkanov Sergey
813 *************************************************************************/
815 
816 
817 /*************************************************************************
818 This function calculates matrix-vector product S*x, when S is symmetric
819 matrix. Matrix S must be stored in CRS or SKS format (exception will be
820 thrown otherwise).
821 
822 INPUT PARAMETERS
823  S - sparse M*M matrix in CRS or SKS format.
824  IsUpper - whether upper or lower triangle of S is given:
825  * if upper triangle is given, only S[i,j] for j>=i
826  are used, and lower triangle is ignored (it can be
827  empty - these elements are not referenced at all).
828  * if lower triangle is given, only S[i,j] for j<=i
829  are used, and upper triangle is ignored.
830  X - array[N], input vector. For performance reasons we
831  make only quick checks - we check that array size is
832  at least N, but we do not check for NAN's or INF's.
833  Y - output buffer, possibly preallocated. In case buffer
834  size is too small to store result, this buffer is
835  automatically resized.
836 
837 OUTPUT PARAMETERS
838  Y - array[M], S*x
839 
840 NOTE: this function throws exception when called for non-CRS/SKS matrix.
841 You must convert your matrix with SparseConvertToCRS/SKS() before using
842 this function.
843 
844  -- ALGLIB PROJECT --
845  Copyright 14.10.2011 by Bochkanov Sergey
846 *************************************************************************/
847 void sparsesmv(const sparsematrix &s, const bool isupper, const real_1d_array &x, real_1d_array &y);
848 
849 
850 /*************************************************************************
851 This function calculates vector-matrix-vector product x'*S*x, where S is
852 symmetric matrix. Matrix S must be stored in CRS or SKS format (exception
853 will be thrown otherwise).
854 
855 INPUT PARAMETERS
856  S - sparse M*M matrix in CRS or SKS format.
857  IsUpper - whether upper or lower triangle of S is given:
858  * if upper triangle is given, only S[i,j] for j>=i
859  are used, and lower triangle is ignored (it can be
860  empty - these elements are not referenced at all).
861  * if lower triangle is given, only S[i,j] for j<=i
862  are used, and upper triangle is ignored.
863  X - array[N], input vector. For performance reasons we
864  make only quick checks - we check that array size is
865  at least N, but we do not check for NAN's or INF's.
866 
867 RESULT
868  x'*S*x
869 
870 NOTE: this function throws exception when called for non-CRS/SKS matrix.
871 You must convert your matrix with SparseConvertToCRS/SKS() before using
872 this function.
873 
874  -- ALGLIB PROJECT --
875  Copyright 27.01.2014 by Bochkanov Sergey
876 *************************************************************************/
877 double sparsevsmv(const sparsematrix &s, const bool isupper, const real_1d_array &x);
878 
879 
880 /*************************************************************************
881 This function calculates matrix-matrix product S*A. Matrix S must be
882 stored in CRS or SKS format (exception will be thrown otherwise).
883 
884 INPUT PARAMETERS
885  S - sparse M*N matrix in CRS or SKS format.
886  A - array[N][K], input dense matrix. For performance reasons
887  we make only quick checks - we check that array size
888  is at least N, but we do not check for NAN's or INF's.
889  K - number of columns of matrix (A).
890  B - output buffer, possibly preallocated. In case buffer
891  size is too small to store result, this buffer is
892  automatically resized.
893 
894 OUTPUT PARAMETERS
895  B - array[M][K], S*A
896 
897 NOTE: this function throws exception when called for non-CRS/SKS matrix.
898 You must convert your matrix with SparseConvertToCRS/SKS() before using
899 this function.
900 
901  -- ALGLIB PROJECT --
902  Copyright 14.10.2011 by Bochkanov Sergey
903 *************************************************************************/
904 void sparsemm(const sparsematrix &s, const real_2d_array &a, const ae_int_t k, real_2d_array &b);
905 
906 
907 /*************************************************************************
908 This function calculates matrix-matrix product S^T*A. Matrix S must be
909 stored in CRS or SKS format (exception will be thrown otherwise).
910 
911 INPUT PARAMETERS
912  S - sparse M*N matrix in CRS or SKS format.
913  A - array[M][K], input dense matrix. For performance reasons
914  we make only quick checks - we check that array size is
915  at least M, but we do not check for NAN's or INF's.
916  K - number of columns of matrix (A).
917  B - output buffer, possibly preallocated. In case buffer
918  size is too small to store result, this buffer is
919  automatically resized.
920 
921 OUTPUT PARAMETERS
922  B - array[N][K], S^T*A
923 
924 NOTE: this function throws exception when called for non-CRS/SKS matrix.
925 You must convert your matrix with SparseConvertToCRS/SKS() before using
926 this function.
927 
928  -- ALGLIB PROJECT --
929  Copyright 14.10.2011 by Bochkanov Sergey
930 *************************************************************************/
931 void sparsemtm(const sparsematrix &s, const real_2d_array &a, const ae_int_t k, real_2d_array &b);
932 
933 
934 /*************************************************************************
935 This function simultaneously calculates two matrix-matrix products:
936  S*A and S^T*A.
937 S must be square (non-rectangular) matrix stored in CRS or SKS format
938 (exception will be thrown otherwise).
939 
940 INPUT PARAMETERS
941  S - sparse N*N matrix in CRS or SKS format.
942  A - array[N][K], input dense matrix. For performance reasons
943  we make only quick checks - we check that array size is
944  at least N, but we do not check for NAN's or INF's.
945  K - number of columns of matrix (A).
946  B0 - output buffer, possibly preallocated. In case buffer
947  size is too small to store result, this buffer is
948  automatically resized.
949  B1 - output buffer, possibly preallocated. In case buffer
950  size is too small to store result, this buffer is
951  automatically resized.
952 
953 OUTPUT PARAMETERS
954  B0 - array[N][K], S*A
955  B1 - array[N][K], S^T*A
956 
957 NOTE: this function throws exception when called for non-CRS/SKS matrix.
958 You must convert your matrix with SparseConvertToCRS/SKS() before using
959 this function.
960 
961  -- ALGLIB PROJECT --
962  Copyright 14.10.2011 by Bochkanov Sergey
963 *************************************************************************/
964 void sparsemm2(const sparsematrix &s, const real_2d_array &a, const ae_int_t k, real_2d_array &b0, real_2d_array &b1);
965 
966 
967 /*************************************************************************
968 This function calculates matrix-matrix product S*A, when S is symmetric
969 matrix. Matrix S must be stored in CRS or SKS format (exception will be
970 thrown otherwise).
971 
972 INPUT PARAMETERS
973  S - sparse M*M matrix in CRS or SKS format.
974  IsUpper - whether upper or lower triangle of S is given:
975  * if upper triangle is given, only S[i,j] for j>=i
976  are used, and lower triangle is ignored (it can be
977  empty - these elements are not referenced at all).
978  * if lower triangle is given, only S[i,j] for j<=i
979  are used, and upper triangle is ignored.
980  A - array[N][K], input dense matrix. For performance reasons
981  we make only quick checks - we check that array size is
982  at least N, but we do not check for NAN's or INF's.
983  K - number of columns of matrix (A).
984  B - output buffer, possibly preallocated. In case buffer
985  size is too small to store result, this buffer is
986  automatically resized.
987 
988 OUTPUT PARAMETERS
989  B - array[M][K], S*A
990 
991 NOTE: this function throws exception when called for non-CRS/SKS matrix.
992 You must convert your matrix with SparseConvertToCRS/SKS() before using
993 this function.
994 
995  -- ALGLIB PROJECT --
996  Copyright 14.10.2011 by Bochkanov Sergey
997 *************************************************************************/
998 void sparsesmm(const sparsematrix &s, const bool isupper, const real_2d_array &a, const ae_int_t k, real_2d_array &b);
999 
1000 
1001 /*************************************************************************
1002 This function calculates matrix-vector product op(S)*x, when x is vector,
1003 S is symmetric triangular matrix, op(S) is transposition or no operation.
1004 Matrix S must be stored in CRS or SKS format (exception will be thrown
1005 otherwise).
1006 
1007 INPUT PARAMETERS
1008  S - sparse square matrix in CRS or SKS format.
1009  IsUpper - whether upper or lower triangle of S is used:
1010  * if upper triangle is given, only S[i,j] for j>=i
1011  are used, and lower triangle is ignored (it can be
1012  empty - these elements are not referenced at all).
1013  * if lower triangle is given, only S[i,j] for j<=i
1014  are used, and upper triangle is ignored.
1015  IsUnit - unit or non-unit diagonal:
1016  * if True, diagonal elements of triangular matrix are
1017  considered equal to 1.0. Actual elements stored in
1018  S are not referenced at all.
1019  * if False, diagonal stored in S is used
1020  OpType - operation type:
1021  * if 0, S*x is calculated
1022  * if 1, (S^T)*x is calculated (transposition)
1023  X - array[N] which stores input vector. For performance
1024  reasons we make only quick checks - we check that
1025  array size is at least N, but we do not check for
1026  NAN's or INF's.
1027  Y - possibly preallocated input buffer. Automatically
1028  resized if its size is too small.
1029 
1030 OUTPUT PARAMETERS
1031  Y - array[N], op(S)*x
1032 
1033 NOTE: this function throws exception when called for non-CRS/SKS matrix.
1034 You must convert your matrix with SparseConvertToCRS/SKS() before using
1035 this function.
1036 
1037  -- ALGLIB PROJECT --
1038  Copyright 20.01.2014 by Bochkanov Sergey
1039 *************************************************************************/
1040 void sparsetrmv(const sparsematrix &s, const bool isupper, const bool isunit, const ae_int_t optype, const real_1d_array &x, real_1d_array &y);
1041 
1042 
1043 /*************************************************************************
1044 This function solves linear system op(S)*y=x where x is vector, S is
1045 symmetric triangular matrix, op(S) is transposition or no operation.
1046 Matrix S must be stored in CRS or SKS format (exception will be thrown
1047 otherwise).
1048 
1049 INPUT PARAMETERS
1050  S - sparse square matrix in CRS or SKS format.
1051  IsUpper - whether upper or lower triangle of S is used:
1052  * if upper triangle is given, only S[i,j] for j>=i
1053  are used, and lower triangle is ignored (it can be
1054  empty - these elements are not referenced at all).
1055  * if lower triangle is given, only S[i,j] for j<=i
1056  are used, and upper triangle is ignored.
1057  IsUnit - unit or non-unit diagonal:
1058  * if True, diagonal elements of triangular matrix are
1059  considered equal to 1.0. Actual elements stored in
1060  S are not referenced at all.
1061  * if False, diagonal stored in S is used. It is your
1062  responsibility to make sure that diagonal is
1063  non-zero.
1064  OpType - operation type:
1065  * if 0, S*x is calculated
1066  * if 1, (S^T)*x is calculated (transposition)
1067  X - array[N] which stores input vector. For performance
1068  reasons we make only quick checks - we check that
1069  array size is at least N, but we do not check for
1070  NAN's or INF's.
1071 
1072 OUTPUT PARAMETERS
1073  X - array[N], inv(op(S))*x
1074 
1075 NOTE: this function throws exception when called for non-CRS/SKS matrix.
1076  You must convert your matrix with SparseConvertToCRS/SKS() before
1077  using this function.
1078 
1079 NOTE: no assertion or tests are done during algorithm operation. It is
1080  your responsibility to provide invertible matrix to algorithm.
1081 
1082  -- ALGLIB PROJECT --
1083  Copyright 20.01.2014 by Bochkanov Sergey
1084 *************************************************************************/
1085 void sparsetrsv(const sparsematrix &s, const bool isupper, const bool isunit, const ae_int_t optype, const real_1d_array &x);
1086 
1087 
1088 /*************************************************************************
1089 This procedure resizes Hash-Table matrix. It can be called when you have
1090 deleted too many elements from the matrix, and you want to free unneeded
1091 memory.
1092 
1093  -- ALGLIB PROJECT --
1094  Copyright 14.10.2011 by Bochkanov Sergey
1095 *************************************************************************/
1097 
1098 
1099 /*************************************************************************
1100 This function is used to enumerate all elements of the sparse matrix.
1101 Before first call user initializes T0 and T1 counters by zero. These
1102 counters are used to remember current position in a matrix; after each
1103 call they are updated by the function.
1104 
1105 Subsequent calls to this function return non-zero elements of the sparse
1106 matrix, one by one. If you enumerate CRS matrix, matrix is traversed from
1107 left to right, from top to bottom. In case you enumerate matrix stored as
1108 Hash table, elements are returned in random order.
1109 
1110 EXAMPLE
1111  > T0=0
1112  > T1=0
1113  > while SparseEnumerate(S,T0,T1,I,J,V) do
1114  > ....do something with I,J,V
1115 
1116 INPUT PARAMETERS
1117  S - sparse M*N matrix in Hash-Table or CRS representation.
1118  T0 - internal counter
1119  T1 - internal counter
1120 
1121 OUTPUT PARAMETERS
1122  T0 - new value of the internal counter
1123  T1 - new value of the internal counter
1124  I - row index of non-zero element, 0<=I<M.
1125  J - column index of non-zero element, 0<=J<N
1126  V - value of the T-th element
1127 
1128 RESULT
1129  True in case of success (next non-zero element was retrieved)
1130  False in case all non-zero elements were enumerated
1131 
1132 NOTE: you may call SparseRewriteExisting() during enumeration, but it is
1133  THE ONLY matrix modification function you can call!!! Other
1134  matrix modification functions should not be called during enumeration!
1135 
1136  -- ALGLIB PROJECT --
1137  Copyright 14.03.2012 by Bochkanov Sergey
1138 *************************************************************************/
1139 bool sparseenumerate(const sparsematrix &s, ae_int_t &t0, ae_int_t &t1, ae_int_t &i, ae_int_t &j, double &v);
1140 
1141 
1142 /*************************************************************************
1143 This function rewrites existing (non-zero) element. It returns True if
1144 element exists or False, when it is called for non-existing (zero)
1145 element.
1146 
1147 This function works with any kind of the matrix.
1148 
1149 The purpose of this function is to provide convenient thread-safe way to
1150 modify sparse matrix. Such modification (already existing element is
1151 rewritten) is guaranteed to be thread-safe without any synchronization, as
1152 long as different threads modify different elements.
1153 
1154 INPUT PARAMETERS
1155  S - sparse M*N matrix in any kind of representation
1156  (Hash, SKS, CRS).
1157  I - row index of non-zero element to modify, 0<=I<M
1158  J - column index of non-zero element to modify, 0<=J<N
1159  V - value to rewrite, must be finite number
1160 
1161 OUTPUT PARAMETERS
1162  S - modified matrix
1163 RESULT
1164  True in case when element exists
1165  False in case when element doesn't exist or it is zero
1166 
1167  -- ALGLIB PROJECT --
1168  Copyright 14.03.2012 by Bochkanov Sergey
1169 *************************************************************************/
1170 bool sparserewriteexisting(const sparsematrix &s, const ae_int_t i, const ae_int_t j, const double v);
1171 
1172 
1173 /*************************************************************************
1174 This function returns I-th row of the sparse matrix. Matrix must be stored
1175 in CRS or SKS format.
1176 
1177 INPUT PARAMETERS:
1178  S - sparse M*N matrix in CRS format
1179  I - row index, 0<=I<M
1180  IRow - output buffer, can be preallocated. In case buffer
1181  size is too small to store I-th row, it is
1182  automatically reallocated.
1183 
1184 OUTPUT PARAMETERS:
1185  IRow - array[M], I-th row.
1186 
1187 NOTE: this function has O(N) running time, where N is a column count. It
1188  allocates and fills N-element array, even although most of its
1189  elemets are zero.
1190 
1191 NOTE: If you have O(non-zeros-per-row) time and memory requirements, use
1192  SparseGetCompressedRow() function. It returns data in compressed
1193  format.
1194 
1195 NOTE: when incorrect I (outside of [0,M-1]) or matrix (non CRS/SKS)
1196  is passed, this function throws exception.
1197 
1198  -- ALGLIB PROJECT --
1199  Copyright 10.12.2014 by Bochkanov Sergey
1200 *************************************************************************/
1201 void sparsegetrow(const sparsematrix &s, const ae_int_t i, real_1d_array &irow);
1202 
1203 
1204 /*************************************************************************
1205 This function returns I-th row of the sparse matrix IN COMPRESSED FORMAT -
1206 only non-zero elements are returned (with their indexes). Matrix must be
1207 stored in CRS or SKS format.
1208 
1209 INPUT PARAMETERS:
1210  S - sparse M*N matrix in CRS format
1211  I - row index, 0<=I<M
1212  ColIdx - output buffer for column indexes, can be preallocated.
1213  In case buffer size is too small to store I-th row, it
1214  is automatically reallocated.
1215  Vals - output buffer for values, can be preallocated. In case
1216  buffer size is too small to store I-th row, it is
1217  automatically reallocated.
1218 
1219 OUTPUT PARAMETERS:
1220  ColIdx - column indexes of non-zero elements, sorted by
1221  ascending. Symbolically non-zero elements are counted
1222  (i.e. if you allocated place for element, but it has
1223  zero numerical value - it is counted).
1224  Vals - values. Vals[K] stores value of matrix element with
1225  indexes (I,ColIdx[K]). Symbolically non-zero elements
1226  are counted (i.e. if you allocated place for element,
1227  but it has zero numerical value - it is counted).
1228  NZCnt - number of symbolically non-zero elements per row.
1229 
1230 NOTE: when incorrect I (outside of [0,M-1]) or matrix (non CRS/SKS)
1231  is passed, this function throws exception.
1232 
1233 NOTE: this function may allocate additional, unnecessary place for ColIdx
1234  and Vals arrays. It is dictated by performance reasons - on SKS
1235  matrices it is faster to allocate space at the beginning with
1236  some "extra"-space, than performing two passes over matrix - first
1237  time to calculate exact space required for data, second time - to
1238  store data itself.
1239 
1240  -- ALGLIB PROJECT --
1241  Copyright 10.12.2014 by Bochkanov Sergey
1242 *************************************************************************/
1243 void sparsegetcompressedrow(const sparsematrix &s, const ae_int_t i, integer_1d_array &colidx, real_1d_array &vals, ae_int_t &nzcnt);
1244 
1245 
1246 /*************************************************************************
1247 This function performs efficient in-place transpose of SKS matrix. No
1248 additional memory is allocated during transposition.
1249 
1250 This function supports only skyline storage format (SKS).
1251 
1252 INPUT PARAMETERS
1253  S - sparse matrix in SKS format.
1254 
1255 OUTPUT PARAMETERS
1256  S - sparse matrix, transposed.
1257 
1258  -- ALGLIB PROJECT --
1259  Copyright 16.01.2014 by Bochkanov Sergey
1260 *************************************************************************/
1262 
1263 
1264 /*************************************************************************
1265 This function performs in-place conversion to desired sparse storage
1266 format.
1267 
1268 INPUT PARAMETERS
1269  S0 - sparse matrix in any format.
1270  Fmt - desired storage format of the output, as returned by
1271  SparseGetMatrixType() function:
1272  * 0 for hash-based storage
1273  * 1 for CRS
1274  * 2 for SKS
1275 
1276 OUTPUT PARAMETERS
1277  S0 - sparse matrix in requested format.
1278 
1279 NOTE: in-place conversion wastes a lot of memory which is used to store
1280  temporaries. If you perform a lot of repeated conversions, we
1281  recommend to use out-of-place buffered conversion functions, like
1282  SparseCopyToBuf(), which can reuse already allocated memory.
1283 
1284  -- ALGLIB PROJECT --
1285  Copyright 16.01.2014 by Bochkanov Sergey
1286 *************************************************************************/
1287 void sparseconvertto(const sparsematrix &s0, const ae_int_t fmt);
1288 
1289 
1290 /*************************************************************************
1291 This function performs out-of-place conversion to desired sparse storage
1292 format. S0 is copied to S1 and converted on-the-fly. Memory allocated in
1293 S1 is reused to maximum extent possible.
1294 
1295 INPUT PARAMETERS
1296  S0 - sparse matrix in any format.
1297  Fmt - desired storage format of the output, as returned by
1298  SparseGetMatrixType() function:
1299  * 0 for hash-based storage
1300  * 1 for CRS
1301  * 2 for SKS
1302 
1303 OUTPUT PARAMETERS
1304  S1 - sparse matrix in requested format.
1305 
1306  -- ALGLIB PROJECT --
1307  Copyright 16.01.2014 by Bochkanov Sergey
1308 *************************************************************************/
1309 void sparsecopytobuf(const sparsematrix &s0, const ae_int_t fmt, const sparsematrix &s1);
1310 
1311 
1312 /*************************************************************************
1313 This function performs in-place conversion to Hash table storage.
1314 
1315 INPUT PARAMETERS
1316  S - sparse matrix in CRS format.
1317 
1318 OUTPUT PARAMETERS
1319  S - sparse matrix in Hash table format.
1320 
1321 NOTE: this function has no effect when called with matrix which is
1322  already in Hash table mode.
1323 
1324 NOTE: in-place conversion involves allocation of temporary arrays. If you
1325  perform a lot of repeated in- place conversions, it may lead to
1326  memory fragmentation. Consider using out-of-place SparseCopyToHashBuf()
1327  function in this case.
1328 
1329  -- ALGLIB PROJECT --
1330  Copyright 20.07.2012 by Bochkanov Sergey
1331 *************************************************************************/
1333 
1334 
1335 /*************************************************************************
1336 This function performs out-of-place conversion to Hash table storage
1337 format. S0 is copied to S1 and converted on-the-fly.
1338 
1339 INPUT PARAMETERS
1340  S0 - sparse matrix in any format.
1341 
1342 OUTPUT PARAMETERS
1343  S1 - sparse matrix in Hash table format.
1344 
1345 NOTE: if S0 is stored as Hash-table, it is just copied without conversion.
1346 
1347 NOTE: this function de-allocates memory occupied by S1 before starting
1348  conversion. If you perform a lot of repeated conversions, it may
1349  lead to memory fragmentation. In this case we recommend you to use
1350  SparseCopyToHashBuf() function which re-uses memory in S1 as much as
1351  possible.
1352 
1353  -- ALGLIB PROJECT --
1354  Copyright 20.07.2012 by Bochkanov Sergey
1355 *************************************************************************/
1357 
1358 
1359 /*************************************************************************
1360 This function performs out-of-place conversion to Hash table storage
1361 format. S0 is copied to S1 and converted on-the-fly. Memory allocated in
1362 S1 is reused to maximum extent possible.
1363 
1364 INPUT PARAMETERS
1365  S0 - sparse matrix in any format.
1366 
1367 OUTPUT PARAMETERS
1368  S1 - sparse matrix in Hash table format.
1369 
1370 NOTE: if S0 is stored as Hash-table, it is just copied without conversion.
1371 
1372  -- ALGLIB PROJECT --
1373  Copyright 20.07.2012 by Bochkanov Sergey
1374 *************************************************************************/
1375 void sparsecopytohashbuf(const sparsematrix &s0, const sparsematrix &s1);
1376 
1377 
1378 /*************************************************************************
1379 This function converts matrix to CRS format.
1380 
1381 Some algorithms (linear algebra ones, for example) require matrices in
1382 CRS format. This function allows to perform in-place conversion.
1383 
1384 INPUT PARAMETERS
1385  S - sparse M*N matrix in any format
1386 
1387 OUTPUT PARAMETERS
1388  S - matrix in CRS format
1389 
1390 NOTE: this function has no effect when called with matrix which is
1391  already in CRS mode.
1392 
1393 NOTE: this function allocates temporary memory to store a copy of the
1394  matrix. If you perform a lot of repeated conversions, we recommend
1395  you to use SparseCopyToCRSBuf() function, which can reuse
1396  previously allocated memory.
1397 
1398  -- ALGLIB PROJECT --
1399  Copyright 14.10.2011 by Bochkanov Sergey
1400 *************************************************************************/
1402 
1403 
1404 /*************************************************************************
1405 This function performs out-of-place conversion to CRS format. S0 is
1406 copied to S1 and converted on-the-fly.
1407 
1408 INPUT PARAMETERS
1409  S0 - sparse matrix in any format.
1410 
1411 OUTPUT PARAMETERS
1412  S1 - sparse matrix in CRS format.
1413 
1414 NOTE: if S0 is stored as CRS, it is just copied without conversion.
1415 
1416 NOTE: this function de-allocates memory occupied by S1 before starting CRS
1417  conversion. If you perform a lot of repeated CRS conversions, it may
1418  lead to memory fragmentation. In this case we recommend you to use
1419  SparseCopyToCRSBuf() function which re-uses memory in S1 as much as
1420  possible.
1421 
1422  -- ALGLIB PROJECT --
1423  Copyright 20.07.2012 by Bochkanov Sergey
1424 *************************************************************************/
1426 
1427 
1428 /*************************************************************************
1429 This function performs out-of-place conversion to CRS format. S0 is
1430 copied to S1 and converted on-the-fly. Memory allocated in S1 is reused to
1431 maximum extent possible.
1432 
1433 INPUT PARAMETERS
1434  S0 - sparse matrix in any format.
1435  S1 - matrix which may contain some pre-allocated memory, or
1436  can be just uninitialized structure.
1437 
1438 OUTPUT PARAMETERS
1439  S1 - sparse matrix in CRS format.
1440 
1441 NOTE: if S0 is stored as CRS, it is just copied without conversion.
1442 
1443  -- ALGLIB PROJECT --
1444  Copyright 20.07.2012 by Bochkanov Sergey
1445 *************************************************************************/
1446 void sparsecopytocrsbuf(const sparsematrix &s0, const sparsematrix &s1);
1447 
1448 
1449 /*************************************************************************
1450 This function performs in-place conversion to SKS format.
1451 
1452 INPUT PARAMETERS
1453  S - sparse matrix in any format.
1454 
1455 OUTPUT PARAMETERS
1456  S - sparse matrix in SKS format.
1457 
1458 NOTE: this function has no effect when called with matrix which is
1459  already in SKS mode.
1460 
1461 NOTE: in-place conversion involves allocation of temporary arrays. If you
1462  perform a lot of repeated in- place conversions, it may lead to
1463  memory fragmentation. Consider using out-of-place SparseCopyToSKSBuf()
1464  function in this case.
1465 
1466  -- ALGLIB PROJECT --
1467  Copyright 15.01.2014 by Bochkanov Sergey
1468 *************************************************************************/
1470 
1471 
1472 /*************************************************************************
1473 This function performs out-of-place conversion to SKS storage format.
1474 S0 is copied to S1 and converted on-the-fly.
1475 
1476 INPUT PARAMETERS
1477  S0 - sparse matrix in any format.
1478 
1479 OUTPUT PARAMETERS
1480  S1 - sparse matrix in SKS format.
1481 
1482 NOTE: if S0 is stored as SKS, it is just copied without conversion.
1483 
1484 NOTE: this function de-allocates memory occupied by S1 before starting
1485  conversion. If you perform a lot of repeated conversions, it may
1486  lead to memory fragmentation. In this case we recommend you to use
1487  SparseCopyToSKSBuf() function which re-uses memory in S1 as much as
1488  possible.
1489 
1490  -- ALGLIB PROJECT --
1491  Copyright 20.07.2012 by Bochkanov Sergey
1492 *************************************************************************/
1494 
1495 
1496 /*************************************************************************
1497 This function performs out-of-place conversion to SKS format. S0 is
1498 copied to S1 and converted on-the-fly. Memory allocated in S1 is reused
1499 to maximum extent possible.
1500 
1501 INPUT PARAMETERS
1502  S0 - sparse matrix in any format.
1503 
1504 OUTPUT PARAMETERS
1505  S1 - sparse matrix in SKS format.
1506 
1507 NOTE: if S0 is stored as SKS, it is just copied without conversion.
1508 
1509  -- ALGLIB PROJECT --
1510  Copyright 20.07.2012 by Bochkanov Sergey
1511 *************************************************************************/
1512 void sparsecopytosksbuf(const sparsematrix &s0, const sparsematrix &s1);
1513 
1514 
1515 /*************************************************************************
1516 This function returns type of the matrix storage format.
1517 
1518 INPUT PARAMETERS:
1519  S - sparse matrix.
1520 
1521 RESULT:
1522  sparse storage format used by matrix:
1523  0 - Hash-table
1524  1 - CRS (compressed row storage)
1525  2 - SKS (skyline)
1526 
1527 NOTE: future versions of ALGLIB may include additional sparse storage
1528  formats.
1529 
1530 
1531  -- ALGLIB PROJECT --
1532  Copyright 20.07.2012 by Bochkanov Sergey
1533 *************************************************************************/
1535 
1536 
1537 /*************************************************************************
1538 This function checks matrix storage format and returns True when matrix is
1539 stored using Hash table representation.
1540 
1541 INPUT PARAMETERS:
1542  S - sparse matrix.
1543 
1544 RESULT:
1545  True if matrix type is Hash table
1546  False if matrix type is not Hash table
1547 
1548  -- ALGLIB PROJECT --
1549  Copyright 20.07.2012 by Bochkanov Sergey
1550 *************************************************************************/
1552 
1553 
1554 /*************************************************************************
1555 This function checks matrix storage format and returns True when matrix is
1556 stored using CRS representation.
1557 
1558 INPUT PARAMETERS:
1559  S - sparse matrix.
1560 
1561 RESULT:
1562  True if matrix type is CRS
1563  False if matrix type is not CRS
1564 
1565  -- ALGLIB PROJECT --
1566  Copyright 20.07.2012 by Bochkanov Sergey
1567 *************************************************************************/
1568 bool sparseiscrs(const sparsematrix &s);
1569 
1570 
1571 /*************************************************************************
1572 This function checks matrix storage format and returns True when matrix is
1573 stored using SKS representation.
1574 
1575 INPUT PARAMETERS:
1576  S - sparse matrix.
1577 
1578 RESULT:
1579  True if matrix type is SKS
1580  False if matrix type is not SKS
1581 
1582  -- ALGLIB PROJECT --
1583  Copyright 20.07.2012 by Bochkanov Sergey
1584 *************************************************************************/
1585 bool sparseissks(const sparsematrix &s);
1586 
1587 
1588 /*************************************************************************
1589 The function frees all memory occupied by sparse matrix. Sparse matrix
1590 structure becomes unusable after this call.
1591 
1592 OUTPUT PARAMETERS
1593  S - sparse matrix to delete
1594 
1595  -- ALGLIB PROJECT --
1596  Copyright 24.07.2012 by Bochkanov Sergey
1597 *************************************************************************/
1599 
1600 
1601 /*************************************************************************
1602 The function returns number of rows of a sparse matrix.
1603 
1604 RESULT: number of rows of a sparse matrix.
1605 
1606  -- ALGLIB PROJECT --
1607  Copyright 23.08.2012 by Bochkanov Sergey
1608 *************************************************************************/
1610 
1611 
1612 /*************************************************************************
1613 The function returns number of columns of a sparse matrix.
1614 
1615 RESULT: number of columns of a sparse matrix.
1616 
1617  -- ALGLIB PROJECT --
1618  Copyright 23.08.2012 by Bochkanov Sergey
1619 *************************************************************************/
1621 
1622 
1623 /*************************************************************************
1624 The function returns number of strictly upper triangular non-zero elements
1625 in the matrix. It counts SYMBOLICALLY non-zero elements, i.e. entries
1626 in the sparse matrix data structure. If some element has zero numerical
1627 value, it is still counted.
1628 
1629 This function has different cost for different types of matrices:
1630 * for hash-based matrices it involves complete pass over entire hash-table
1631  with O(NNZ) cost, where NNZ is number of non-zero elements
1632 * for CRS and SKS matrix types cost of counting is O(N) (N - matrix size).
1633 
1634 RESULT: number of non-zero elements strictly above main diagonal
1635 
1636  -- ALGLIB PROJECT --
1637  Copyright 12.02.2014 by Bochkanov Sergey
1638 *************************************************************************/
1640 
1641 
1642 /*************************************************************************
1643 The function returns number of strictly lower triangular non-zero elements
1644 in the matrix. It counts SYMBOLICALLY non-zero elements, i.e. entries
1645 in the sparse matrix data structure. If some element has zero numerical
1646 value, it is still counted.
1647 
1648 This function has different cost for different types of matrices:
1649 * for hash-based matrices it involves complete pass over entire hash-table
1650  with O(NNZ) cost, where NNZ is number of non-zero elements
1651 * for CRS and SKS matrix types cost of counting is O(N) (N - matrix size).
1652 
1653 RESULT: number of non-zero elements strictly below main diagonal
1654 
1655  -- ALGLIB PROJECT --
1656  Copyright 12.02.2014 by Bochkanov Sergey
1657 *************************************************************************/
1659 
1660 /*************************************************************************
1661 Generation of a random uniformly distributed (Haar) orthogonal matrix
1662 
1663 INPUT PARAMETERS:
1664  N - matrix size, N>=1
1665 
1666 OUTPUT PARAMETERS:
1667  A - orthogonal NxN matrix, array[0..N-1,0..N-1]
1668 
1669 NOTE: this function uses algorithm described in Stewart, G. W. (1980),
1670  "The Efficient Generation of Random Orthogonal Matrices with an
1671  Application to Condition Estimators".
1672 
1673  Speaking short, to generate an (N+1)x(N+1) orthogonal matrix, it:
1674  * takes an NxN one
1675  * takes uniformly distributed unit vector of dimension N+1.
1676  * constructs a Householder reflection from the vector, then applies
1677  it to the smaller matrix (embedded in the larger size with a 1 at
1678  the bottom right corner).
1679 
1680  -- ALGLIB routine --
1681  04.12.2009
1682  Bochkanov Sergey
1683 *************************************************************************/
1685 
1686 
1687 /*************************************************************************
1688 Generation of random NxN matrix with given condition number and norm2(A)=1
1689 
1690 INPUT PARAMETERS:
1691  N - matrix size
1692  C - condition number (in 2-norm)
1693 
1694 OUTPUT PARAMETERS:
1695  A - random matrix with norm2(A)=1 and cond(A)=C
1696 
1697  -- ALGLIB routine --
1698  04.12.2009
1699  Bochkanov Sergey
1700 *************************************************************************/
1701 void rmatrixrndcond(const ae_int_t n, const double c, real_2d_array &a);
1702 
1703 
1704 /*************************************************************************
1705 Generation of a random Haar distributed orthogonal complex matrix
1706 
1707 INPUT PARAMETERS:
1708  N - matrix size, N>=1
1709 
1710 OUTPUT PARAMETERS:
1711  A - orthogonal NxN matrix, array[0..N-1,0..N-1]
1712 
1713 NOTE: this function uses algorithm described in Stewart, G. W. (1980),
1714  "The Efficient Generation of Random Orthogonal Matrices with an
1715  Application to Condition Estimators".
1716 
1717  Speaking short, to generate an (N+1)x(N+1) orthogonal matrix, it:
1718  * takes an NxN one
1719  * takes uniformly distributed unit vector of dimension N+1.
1720  * constructs a Householder reflection from the vector, then applies
1721  it to the smaller matrix (embedded in the larger size with a 1 at
1722  the bottom right corner).
1723 
1724  -- ALGLIB routine --
1725  04.12.2009
1726  Bochkanov Sergey
1727 *************************************************************************/
1729 
1730 
1731 /*************************************************************************
1732 Generation of random NxN complex matrix with given condition number C and
1733 norm2(A)=1
1734 
1735 INPUT PARAMETERS:
1736  N - matrix size
1737  C - condition number (in 2-norm)
1738 
1739 OUTPUT PARAMETERS:
1740  A - random matrix with norm2(A)=1 and cond(A)=C
1741 
1742  -- ALGLIB routine --
1743  04.12.2009
1744  Bochkanov Sergey
1745 *************************************************************************/
1746 void cmatrixrndcond(const ae_int_t n, const double c, complex_2d_array &a);
1747 
1748 
1749 /*************************************************************************
1750 Generation of random NxN symmetric matrix with given condition number and
1751 norm2(A)=1
1752 
1753 INPUT PARAMETERS:
1754  N - matrix size
1755  C - condition number (in 2-norm)
1756 
1757 OUTPUT PARAMETERS:
1758  A - random matrix with norm2(A)=1 and cond(A)=C
1759 
1760  -- ALGLIB routine --
1761  04.12.2009
1762  Bochkanov Sergey
1763 *************************************************************************/
1764 void smatrixrndcond(const ae_int_t n, const double c, real_2d_array &a);
1765 
1766 
1767 /*************************************************************************
1768 Generation of random NxN symmetric positive definite matrix with given
1769 condition number and norm2(A)=1
1770 
1771 INPUT PARAMETERS:
1772  N - matrix size
1773  C - condition number (in 2-norm)
1774 
1775 OUTPUT PARAMETERS:
1776  A - random SPD matrix with norm2(A)=1 and cond(A)=C
1777 
1778  -- ALGLIB routine --
1779  04.12.2009
1780  Bochkanov Sergey
1781 *************************************************************************/
1782 void spdmatrixrndcond(const ae_int_t n, const double c, real_2d_array &a);
1783 
1784 
1785 /*************************************************************************
1786 Generation of random NxN Hermitian matrix with given condition number and
1787 norm2(A)=1
1788 
1789 INPUT PARAMETERS:
1790  N - matrix size
1791  C - condition number (in 2-norm)
1792 
1793 OUTPUT PARAMETERS:
1794  A - random matrix with norm2(A)=1 and cond(A)=C
1795 
1796  -- ALGLIB routine --
1797  04.12.2009
1798  Bochkanov Sergey
1799 *************************************************************************/
1800 void hmatrixrndcond(const ae_int_t n, const double c, complex_2d_array &a);
1801 
1802 
1803 /*************************************************************************
1804 Generation of random NxN Hermitian positive definite matrix with given
1805 condition number and norm2(A)=1
1806 
1807 INPUT PARAMETERS:
1808  N - matrix size
1809  C - condition number (in 2-norm)
1810 
1811 OUTPUT PARAMETERS:
1812  A - random HPD matrix with norm2(A)=1 and cond(A)=C
1813 
1814  -- ALGLIB routine --
1815  04.12.2009
1816  Bochkanov Sergey
1817 *************************************************************************/
1818 void hpdmatrixrndcond(const ae_int_t n, const double c, complex_2d_array &a);
1819 
1820 
1821 /*************************************************************************
1822 Multiplication of MxN matrix by NxN random Haar distributed orthogonal matrix
1823 
1824 INPUT PARAMETERS:
1825  A - matrix, array[0..M-1, 0..N-1]
1826  M, N- matrix size
1827 
1828 OUTPUT PARAMETERS:
1829  A - A*Q, where Q is random NxN orthogonal matrix
1830 
1831  -- ALGLIB routine --
1832  04.12.2009
1833  Bochkanov Sergey
1834 *************************************************************************/
1836 
1837 
1838 /*************************************************************************
1839 Multiplication of MxN matrix by MxM random Haar distributed orthogonal matrix
1840 
1841 INPUT PARAMETERS:
1842  A - matrix, array[0..M-1, 0..N-1]
1843  M, N- matrix size
1844 
1845 OUTPUT PARAMETERS:
1846  A - Q*A, where Q is random MxM orthogonal matrix
1847 
1848  -- ALGLIB routine --
1849  04.12.2009
1850  Bochkanov Sergey
1851 *************************************************************************/
1853 
1854 
1855 /*************************************************************************
1856 Multiplication of MxN complex matrix by NxN random Haar distributed
1857 complex orthogonal matrix
1858 
1859 INPUT PARAMETERS:
1860  A - matrix, array[0..M-1, 0..N-1]
1861  M, N- matrix size
1862 
1863 OUTPUT PARAMETERS:
1864  A - A*Q, where Q is random NxN orthogonal matrix
1865 
1866  -- ALGLIB routine --
1867  04.12.2009
1868  Bochkanov Sergey
1869 *************************************************************************/
1871 
1872 
1873 /*************************************************************************
1874 Multiplication of MxN complex matrix by MxM random Haar distributed
1875 complex orthogonal matrix
1876 
1877 INPUT PARAMETERS:
1878  A - matrix, array[0..M-1, 0..N-1]
1879  M, N- matrix size
1880 
1881 OUTPUT PARAMETERS:
1882  A - Q*A, where Q is random MxM orthogonal matrix
1883 
1884  -- ALGLIB routine --
1885  04.12.2009
1886  Bochkanov Sergey
1887 *************************************************************************/
1889 
1890 
1891 /*************************************************************************
1892 Symmetric multiplication of NxN matrix by random Haar distributed
1893 orthogonal matrix
1894 
1895 INPUT PARAMETERS:
1896  A - matrix, array[0..N-1, 0..N-1]
1897  N - matrix size
1898 
1899 OUTPUT PARAMETERS:
1900  A - Q'*A*Q, where Q is random NxN orthogonal matrix
1901 
1902  -- ALGLIB routine --
1903  04.12.2009
1904  Bochkanov Sergey
1905 *************************************************************************/
1907 
1908 
1909 /*************************************************************************
1910 Hermitian multiplication of NxN matrix by random Haar distributed
1911 complex orthogonal matrix
1912 
1913 INPUT PARAMETERS:
1914  A - matrix, array[0..N-1, 0..N-1]
1915  N - matrix size
1916 
1917 OUTPUT PARAMETERS:
1918  A - Q^H*A*Q, where Q is random NxN orthogonal matrix
1919 
1920  -- ALGLIB routine --
1921  04.12.2009
1922  Bochkanov Sergey
1923 *************************************************************************/
1925 
1926 /*************************************************************************
1927 Cache-oblivous complex "copy-and-transpose"
1928 
1929 Input parameters:
1930  M - number of rows
1931  N - number of columns
1932  A - source matrix, MxN submatrix is copied and transposed
1933  IA - submatrix offset (row index)
1934  JA - submatrix offset (column index)
1935  B - destination matrix, must be large enough to store result
1936  IB - submatrix offset (row index)
1937  JB - submatrix offset (column index)
1938 *************************************************************************/
1939 void cmatrixtranspose(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, complex_2d_array &b, const ae_int_t ib, const ae_int_t jb);
1940 
1941 
1942 /*************************************************************************
1943 Cache-oblivous real "copy-and-transpose"
1944 
1945 Input parameters:
1946  M - number of rows
1947  N - number of columns
1948  A - source matrix, MxN submatrix is copied and transposed
1949  IA - submatrix offset (row index)
1950  JA - submatrix offset (column index)
1951  B - destination matrix, must be large enough to store result
1952  IB - submatrix offset (row index)
1953  JB - submatrix offset (column index)
1954 *************************************************************************/
1955 void rmatrixtranspose(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const real_2d_array &b, const ae_int_t ib, const ae_int_t jb);
1956 
1957 
1958 /*************************************************************************
1959 This code enforces symmetricy of the matrix by copying Upper part to lower
1960 one (or vice versa).
1961 
1962 INPUT PARAMETERS:
1963  A - matrix
1964  N - number of rows/columns
1965  IsUpper - whether we want to copy upper triangle to lower one (True)
1966  or vice versa (False).
1967 *************************************************************************/
1968 void rmatrixenforcesymmetricity(const real_2d_array &a, const ae_int_t n, const bool isupper);
1969 
1970 
1971 /*************************************************************************
1972 Copy
1973 
1974 Input parameters:
1975  M - number of rows
1976  N - number of columns
1977  A - source matrix, MxN submatrix is copied and transposed
1978  IA - submatrix offset (row index)
1979  JA - submatrix offset (column index)
1980  B - destination matrix, must be large enough to store result
1981  IB - submatrix offset (row index)
1982  JB - submatrix offset (column index)
1983 *************************************************************************/
1984 void cmatrixcopy(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, complex_2d_array &b, const ae_int_t ib, const ae_int_t jb);
1985 
1986 
1987 /*************************************************************************
1988 Copy
1989 
1990 Input parameters:
1991  M - number of rows
1992  N - number of columns
1993  A - source matrix, MxN submatrix is copied and transposed
1994  IA - submatrix offset (row index)
1995  JA - submatrix offset (column index)
1996  B - destination matrix, must be large enough to store result
1997  IB - submatrix offset (row index)
1998  JB - submatrix offset (column index)
1999 *************************************************************************/
2000 void rmatrixcopy(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, real_2d_array &b, const ae_int_t ib, const ae_int_t jb);
2001 
2002 
2003 /*************************************************************************
2004 Rank-1 correction: A := A + u*v'
2005 
2006 INPUT PARAMETERS:
2007  M - number of rows
2008  N - number of columns
2009  A - target matrix, MxN submatrix is updated
2010  IA - submatrix offset (row index)
2011  JA - submatrix offset (column index)
2012  U - vector #1
2013  IU - subvector offset
2014  V - vector #2
2015  IV - subvector offset
2016 *************************************************************************/
2017 void cmatrixrank1(const ae_int_t m, const ae_int_t n, complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, complex_1d_array &u, const ae_int_t iu, complex_1d_array &v, const ae_int_t iv);
2018 
2019 
2020 /*************************************************************************
2021 Rank-1 correction: A := A + u*v'
2022 
2023 INPUT PARAMETERS:
2024  M - number of rows
2025  N - number of columns
2026  A - target matrix, MxN submatrix is updated
2027  IA - submatrix offset (row index)
2028  JA - submatrix offset (column index)
2029  U - vector #1
2030  IU - subvector offset
2031  V - vector #2
2032  IV - subvector offset
2033 *************************************************************************/
2034 void rmatrixrank1(const ae_int_t m, const ae_int_t n, real_2d_array &a, const ae_int_t ia, const ae_int_t ja, real_1d_array &u, const ae_int_t iu, real_1d_array &v, const ae_int_t iv);
2035 
2036 
2037 /*************************************************************************
2038 Matrix-vector product: y := op(A)*x
2039 
2040 INPUT PARAMETERS:
2041  M - number of rows of op(A)
2042  M>=0
2043  N - number of columns of op(A)
2044  N>=0
2045  A - target matrix
2046  IA - submatrix offset (row index)
2047  JA - submatrix offset (column index)
2048  OpA - operation type:
2049  * OpA=0 => op(A) = A
2050  * OpA=1 => op(A) = A^T
2051  * OpA=2 => op(A) = A^H
2052  X - input vector
2053  IX - subvector offset
2054  IY - subvector offset
2055  Y - preallocated matrix, must be large enough to store result
2056 
2057 OUTPUT PARAMETERS:
2058  Y - vector which stores result
2059 
2060 if M=0, then subroutine does nothing.
2061 if N=0, Y is filled by zeros.
2062 
2063 
2064  -- ALGLIB routine --
2065 
2066  28.01.2010
2067  Bochkanov Sergey
2068 *************************************************************************/
2069 void cmatrixmv(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t opa, const complex_1d_array &x, const ae_int_t ix, complex_1d_array &y, const ae_int_t iy);
2070 
2071 
2072 /*************************************************************************
2073 Matrix-vector product: y := op(A)*x
2074 
2075 INPUT PARAMETERS:
2076  M - number of rows of op(A)
2077  N - number of columns of op(A)
2078  A - target matrix
2079  IA - submatrix offset (row index)
2080  JA - submatrix offset (column index)
2081  OpA - operation type:
2082  * OpA=0 => op(A) = A
2083  * OpA=1 => op(A) = A^T
2084  X - input vector
2085  IX - subvector offset
2086  IY - subvector offset
2087  Y - preallocated matrix, must be large enough to store result
2088 
2089 OUTPUT PARAMETERS:
2090  Y - vector which stores result
2091 
2092 if M=0, then subroutine does nothing.
2093 if N=0, Y is filled by zeros.
2094 
2095 
2096  -- ALGLIB routine --
2097 
2098  28.01.2010
2099  Bochkanov Sergey
2100 *************************************************************************/
2101 void rmatrixmv(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t opa, const real_1d_array &x, const ae_int_t ix, real_1d_array &y, const ae_int_t iy);
2102 
2103 
2104 /*************************************************************************
2105 This subroutine calculates X*op(A^-1) where:
2106 * X is MxN general matrix
2107 * A is NxN upper/lower triangular/unitriangular matrix
2108 * "op" may be identity transformation, transposition, conjugate transposition
2109 
2110 Multiplication result replaces X.
2111 Cache-oblivious algorithm is used.
2112 
2113 COMMERCIAL EDITION OF ALGLIB:
2114 
2115  ! Commercial version of ALGLIB includes two important improvements of
2116  ! this function, which can be used from C++ and C#:
2117  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2118  ! * multicore support
2119  !
2120  ! Intel MKL gives approximately constant (with respect to number of
2121  ! worker threads) acceleration factor which depends on CPU being used,
2122  ! problem size and "baseline" ALGLIB edition which is used for
2123  ! comparison.
2124  !
2125  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2126  ! * about 2-3x faster than ALGLIB for C++ without MKL
2127  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2128  ! Difference in performance will be more striking on newer CPU's with
2129  ! support for newer SIMD instructions.
2130  !
2131  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2132  ! of this function. Because starting/stopping worker thread always
2133  ! involves some overhead, parallelism starts to be profitable for N's
2134  ! larger than 128.
2135  !
2136  ! In order to use multicore features you have to:
2137  ! * use commercial version of ALGLIB
2138  ! * call this function with "smp_" prefix, which indicates that
2139  ! multicore code will be used (for multicore support)
2140  !
2141  ! We recommend you to read 'Working with commercial version' section of
2142  ! ALGLIB Reference Manual in order to find out how to use performance-
2143  ! related features provided by commercial edition of ALGLIB.
2144 
2145 INPUT PARAMETERS
2146  N - matrix size, N>=0
2147  M - matrix size, N>=0
2148  A - matrix, actial matrix is stored in A[I1:I1+N-1,J1:J1+N-1]
2149  I1 - submatrix offset
2150  J1 - submatrix offset
2151  IsUpper - whether matrix is upper triangular
2152  IsUnit - whether matrix is unitriangular
2153  OpType - transformation type:
2154  * 0 - no transformation
2155  * 1 - transposition
2156  * 2 - conjugate transposition
2157  X - matrix, actial matrix is stored in X[I2:I2+M-1,J2:J2+N-1]
2158  I2 - submatrix offset
2159  J2 - submatrix offset
2160 
2161  -- ALGLIB routine --
2162  15.12.2009
2163  Bochkanov Sergey
2164 *************************************************************************/
2165 void cmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t i2, const ae_int_t j2);
2166 void smp_cmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t i2, const ae_int_t j2);
2167 
2168 
2169 /*************************************************************************
2170 This subroutine calculates op(A^-1)*X where:
2171 * X is MxN general matrix
2172 * A is MxM upper/lower triangular/unitriangular matrix
2173 * "op" may be identity transformation, transposition, conjugate transposition
2174 
2175 Multiplication result replaces X.
2176 Cache-oblivious algorithm is used.
2177 
2178 COMMERCIAL EDITION OF ALGLIB:
2179 
2180  ! Commercial version of ALGLIB includes two important improvements of
2181  ! this function, which can be used from C++ and C#:
2182  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2183  ! * multicore support
2184  !
2185  ! Intel MKL gives approximately constant (with respect to number of
2186  ! worker threads) acceleration factor which depends on CPU being used,
2187  ! problem size and "baseline" ALGLIB edition which is used for
2188  ! comparison.
2189  !
2190  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2191  ! * about 2-3x faster than ALGLIB for C++ without MKL
2192  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2193  ! Difference in performance will be more striking on newer CPU's with
2194  ! support for newer SIMD instructions.
2195  !
2196  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2197  ! of this function. Because starting/stopping worker thread always
2198  ! involves some overhead, parallelism starts to be profitable for N's
2199  ! larger than 128.
2200  !
2201  ! In order to use multicore features you have to:
2202  ! * use commercial version of ALGLIB
2203  ! * call this function with "smp_" prefix, which indicates that
2204  ! multicore code will be used (for multicore support)
2205  !
2206  ! We recommend you to read 'Working with commercial version' section of
2207  ! ALGLIB Reference Manual in order to find out how to use performance-
2208  ! related features provided by commercial edition of ALGLIB.
2209 
2210 INPUT PARAMETERS
2211  N - matrix size, N>=0
2212  M - matrix size, N>=0
2213  A - matrix, actial matrix is stored in A[I1:I1+M-1,J1:J1+M-1]
2214  I1 - submatrix offset
2215  J1 - submatrix offset
2216  IsUpper - whether matrix is upper triangular
2217  IsUnit - whether matrix is unitriangular
2218  OpType - transformation type:
2219  * 0 - no transformation
2220  * 1 - transposition
2221  * 2 - conjugate transposition
2222  X - matrix, actial matrix is stored in X[I2:I2+M-1,J2:J2+N-1]
2223  I2 - submatrix offset
2224  J2 - submatrix offset
2225 
2226  -- ALGLIB routine --
2227  15.12.2009
2228  Bochkanov Sergey
2229 *************************************************************************/
2230 void cmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t i2, const ae_int_t j2);
2231 void smp_cmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t i2, const ae_int_t j2);
2232 
2233 
2234 /*************************************************************************
2235 This subroutine calculates X*op(A^-1) where:
2236 * X is MxN general matrix
2237 * A is NxN upper/lower triangular/unitriangular matrix
2238 * "op" may be identity transformation, transposition
2239 
2240 Multiplication result replaces X.
2241 Cache-oblivious algorithm is used.
2242 
2243 COMMERCIAL EDITION OF ALGLIB:
2244 
2245  ! Commercial version of ALGLIB includes two important improvements of
2246  ! this function, which can be used from C++ and C#:
2247  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2248  ! * multicore support
2249  !
2250  ! Intel MKL gives approximately constant (with respect to number of
2251  ! worker threads) acceleration factor which depends on CPU being used,
2252  ! problem size and "baseline" ALGLIB edition which is used for
2253  ! comparison.
2254  !
2255  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2256  ! * about 2-3x faster than ALGLIB for C++ without MKL
2257  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2258  ! Difference in performance will be more striking on newer CPU's with
2259  ! support for newer SIMD instructions.
2260  !
2261  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2262  ! of this function. Because starting/stopping worker thread always
2263  ! involves some overhead, parallelism starts to be profitable for N's
2264  ! larger than 128.
2265  !
2266  ! In order to use multicore features you have to:
2267  ! * use commercial version of ALGLIB
2268  ! * call this function with "smp_" prefix, which indicates that
2269  ! multicore code will be used (for multicore support)
2270  !
2271  ! We recommend you to read 'Working with commercial version' section of
2272  ! ALGLIB Reference Manual in order to find out how to use performance-
2273  ! related features provided by commercial edition of ALGLIB.
2274 
2275 INPUT PARAMETERS
2276  N - matrix size, N>=0
2277  M - matrix size, N>=0
2278  A - matrix, actial matrix is stored in A[I1:I1+N-1,J1:J1+N-1]
2279  I1 - submatrix offset
2280  J1 - submatrix offset
2281  IsUpper - whether matrix is upper triangular
2282  IsUnit - whether matrix is unitriangular
2283  OpType - transformation type:
2284  * 0 - no transformation
2285  * 1 - transposition
2286  X - matrix, actial matrix is stored in X[I2:I2+M-1,J2:J2+N-1]
2287  I2 - submatrix offset
2288  J2 - submatrix offset
2289 
2290  -- ALGLIB routine --
2291  15.12.2009
2292  Bochkanov Sergey
2293 *************************************************************************/
2294 void rmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2);
2295 void smp_rmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2);
2296 
2297 
2298 /*************************************************************************
2299 This subroutine calculates op(A^-1)*X where:
2300 * X is MxN general matrix
2301 * A is MxM upper/lower triangular/unitriangular matrix
2302 * "op" may be identity transformation, transposition
2303 
2304 Multiplication result replaces X.
2305 Cache-oblivious algorithm is used.
2306 
2307 COMMERCIAL EDITION OF ALGLIB:
2308 
2309  ! Commercial version of ALGLIB includes two important improvements of
2310  ! this function, which can be used from C++ and C#:
2311  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2312  ! * multicore support
2313  !
2314  ! Intel MKL gives approximately constant (with respect to number of
2315  ! worker threads) acceleration factor which depends on CPU being used,
2316  ! problem size and "baseline" ALGLIB edition which is used for
2317  ! comparison.
2318  !
2319  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2320  ! * about 2-3x faster than ALGLIB for C++ without MKL
2321  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2322  ! Difference in performance will be more striking on newer CPU's with
2323  ! support for newer SIMD instructions.
2324  !
2325  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2326  ! of this function. Because starting/stopping worker thread always
2327  ! involves some overhead, parallelism starts to be profitable for N's
2328  ! larger than 128.
2329  !
2330  ! In order to use multicore features you have to:
2331  ! * use commercial version of ALGLIB
2332  ! * call this function with "smp_" prefix, which indicates that
2333  ! multicore code will be used (for multicore support)
2334  !
2335  ! We recommend you to read 'Working with commercial version' section of
2336  ! ALGLIB Reference Manual in order to find out how to use performance-
2337  ! related features provided by commercial edition of ALGLIB.
2338 
2339 INPUT PARAMETERS
2340  N - matrix size, N>=0
2341  M - matrix size, N>=0
2342  A - matrix, actial matrix is stored in A[I1:I1+M-1,J1:J1+M-1]
2343  I1 - submatrix offset
2344  J1 - submatrix offset
2345  IsUpper - whether matrix is upper triangular
2346  IsUnit - whether matrix is unitriangular
2347  OpType - transformation type:
2348  * 0 - no transformation
2349  * 1 - transposition
2350  X - matrix, actial matrix is stored in X[I2:I2+M-1,J2:J2+N-1]
2351  I2 - submatrix offset
2352  J2 - submatrix offset
2353 
2354  -- ALGLIB routine --
2355  15.12.2009
2356  Bochkanov Sergey
2357 *************************************************************************/
2358 void rmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2);
2359 void smp_rmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2);
2360 
2361 
2362 /*************************************************************************
2363 This subroutine calculates C=alpha*A*A^H+beta*C or C=alpha*A^H*A+beta*C
2364 where:
2365 * C is NxN Hermitian matrix given by its upper/lower triangle
2366 * A is NxK matrix when A*A^H is calculated, KxN matrix otherwise
2367 
2368 Additional info:
2369 * cache-oblivious algorithm is used.
2370 * multiplication result replaces C. If Beta=0, C elements are not used in
2371  calculations (not multiplied by zero - just not referenced)
2372 * if Alpha=0, A is not used (not multiplied by zero - just not referenced)
2373 * if both Beta and Alpha are zero, C is filled by zeros.
2374 
2375 COMMERCIAL EDITION OF ALGLIB:
2376 
2377  ! Commercial version of ALGLIB includes two important improvements of
2378  ! this function, which can be used from C++ and C#:
2379  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2380  ! * multicore support
2381  !
2382  ! Intel MKL gives approximately constant (with respect to number of
2383  ! worker threads) acceleration factor which depends on CPU being used,
2384  ! problem size and "baseline" ALGLIB edition which is used for
2385  ! comparison.
2386  !
2387  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2388  ! * about 2-3x faster than ALGLIB for C++ without MKL
2389  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2390  ! Difference in performance will be more striking on newer CPU's with
2391  ! support for newer SIMD instructions.
2392  !
2393  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2394  ! of this function. Because starting/stopping worker thread always
2395  ! involves some overhead, parallelism starts to be profitable for N's
2396  ! larger than 128.
2397  !
2398  ! In order to use multicore features you have to:
2399  ! * use commercial version of ALGLIB
2400  ! * call this function with "smp_" prefix, which indicates that
2401  ! multicore code will be used (for multicore support)
2402  !
2403  ! We recommend you to read 'Working with commercial version' section of
2404  ! ALGLIB Reference Manual in order to find out how to use performance-
2405  ! related features provided by commercial edition of ALGLIB.
2406 
2407 INPUT PARAMETERS
2408  N - matrix size, N>=0
2409  K - matrix size, K>=0
2410  Alpha - coefficient
2411  A - matrix
2412  IA - submatrix offset (row index)
2413  JA - submatrix offset (column index)
2414  OpTypeA - multiplication type:
2415  * 0 - A*A^H is calculated
2416  * 2 - A^H*A is calculated
2417  Beta - coefficient
2418  C - preallocated input/output matrix
2419  IC - submatrix offset (row index)
2420  JC - submatrix offset (column index)
2421  IsUpper - whether upper or lower triangle of C is updated;
2422  this function updates only one half of C, leaving
2423  other half unchanged (not referenced at all).
2424 
2425  -- ALGLIB routine --
2426  16.12.2009
2427  Bochkanov Sergey
2428 *************************************************************************/
2429 void cmatrixherk(const ae_int_t n, const ae_int_t k, const double alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper);
2430 void smp_cmatrixherk(const ae_int_t n, const ae_int_t k, const double alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper);
2431 
2432 
2433 /*************************************************************************
2434 This subroutine calculates C=alpha*A*A^T+beta*C or C=alpha*A^T*A+beta*C
2435 where:
2436 * C is NxN symmetric matrix given by its upper/lower triangle
2437 * A is NxK matrix when A*A^T is calculated, KxN matrix otherwise
2438 
2439 Additional info:
2440 * cache-oblivious algorithm is used.
2441 * multiplication result replaces C. If Beta=0, C elements are not used in
2442  calculations (not multiplied by zero - just not referenced)
2443 * if Alpha=0, A is not used (not multiplied by zero - just not referenced)
2444 * if both Beta and Alpha are zero, C is filled by zeros.
2445 
2446 COMMERCIAL EDITION OF ALGLIB:
2447 
2448  ! Commercial version of ALGLIB includes two important improvements of
2449  ! this function, which can be used from C++ and C#:
2450  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2451  ! * multicore support
2452  !
2453  ! Intel MKL gives approximately constant (with respect to number of
2454  ! worker threads) acceleration factor which depends on CPU being used,
2455  ! problem size and "baseline" ALGLIB edition which is used for
2456  ! comparison.
2457  !
2458  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2459  ! * about 2-3x faster than ALGLIB for C++ without MKL
2460  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2461  ! Difference in performance will be more striking on newer CPU's with
2462  ! support for newer SIMD instructions.
2463  !
2464  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2465  ! of this function. Because starting/stopping worker thread always
2466  ! involves some overhead, parallelism starts to be profitable for N's
2467  ! larger than 128.
2468  !
2469  ! In order to use multicore features you have to:
2470  ! * use commercial version of ALGLIB
2471  ! * call this function with "smp_" prefix, which indicates that
2472  ! multicore code will be used (for multicore support)
2473  !
2474  ! We recommend you to read 'Working with commercial version' section of
2475  ! ALGLIB Reference Manual in order to find out how to use performance-
2476  ! related features provided by commercial edition of ALGLIB.
2477 
2478 INPUT PARAMETERS
2479  N - matrix size, N>=0
2480  K - matrix size, K>=0
2481  Alpha - coefficient
2482  A - matrix
2483  IA - submatrix offset (row index)
2484  JA - submatrix offset (column index)
2485  OpTypeA - multiplication type:
2486  * 0 - A*A^T is calculated
2487  * 2 - A^T*A is calculated
2488  Beta - coefficient
2489  C - preallocated input/output matrix
2490  IC - submatrix offset (row index)
2491  JC - submatrix offset (column index)
2492  IsUpper - whether C is upper triangular or lower triangular
2493 
2494  -- ALGLIB routine --
2495  16.12.2009
2496  Bochkanov Sergey
2497 *************************************************************************/
2498 void rmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper);
2499 void smp_rmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper);
2500 
2501 
2502 /*************************************************************************
2503 This subroutine calculates C = alpha*op1(A)*op2(B) +beta*C where:
2504 * C is MxN general matrix
2505 * op1(A) is MxK matrix
2506 * op2(B) is KxN matrix
2507 * "op" may be identity transformation, transposition, conjugate transposition
2508 
2509 Additional info:
2510 * cache-oblivious algorithm is used.
2511 * multiplication result replaces C. If Beta=0, C elements are not used in
2512  calculations (not multiplied by zero - just not referenced)
2513 * if Alpha=0, A is not used (not multiplied by zero - just not referenced)
2514 * if both Beta and Alpha are zero, C is filled by zeros.
2515 
2516 COMMERCIAL EDITION OF ALGLIB:
2517 
2518  ! Commercial version of ALGLIB includes two important improvements of
2519  ! this function, which can be used from C++ and C#:
2520  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2521  ! * multicore support
2522  !
2523  ! Intel MKL gives approximately constant (with respect to number of
2524  ! worker threads) acceleration factor which depends on CPU being used,
2525  ! problem size and "baseline" ALGLIB edition which is used for
2526  ! comparison.
2527  !
2528  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2529  ! * about 2-3x faster than ALGLIB for C++ without MKL
2530  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2531  ! Difference in performance will be more striking on newer CPU's with
2532  ! support for newer SIMD instructions.
2533  !
2534  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2535  ! of this function. Because starting/stopping worker thread always
2536  ! involves some overhead, parallelism starts to be profitable for N's
2537  ! larger than 128.
2538  !
2539  ! In order to use multicore features you have to:
2540  ! * use commercial version of ALGLIB
2541  ! * call this function with "smp_" prefix, which indicates that
2542  ! multicore code will be used (for multicore support)
2543  !
2544  ! We recommend you to read 'Working with commercial version' section of
2545  ! ALGLIB Reference Manual in order to find out how to use performance-
2546  ! related features provided by commercial edition of ALGLIB.
2547 
2548 IMPORTANT:
2549 
2550 This function does NOT preallocate output matrix C, it MUST be preallocated
2551 by caller prior to calling this function. In case C does not have enough
2552 space to store result, exception will be generated.
2553 
2554 INPUT PARAMETERS
2555  M - matrix size, M>0
2556  N - matrix size, N>0
2557  K - matrix size, K>0
2558  Alpha - coefficient
2559  A - matrix
2560  IA - submatrix offset
2561  JA - submatrix offset
2562  OpTypeA - transformation type:
2563  * 0 - no transformation
2564  * 1 - transposition
2565  * 2 - conjugate transposition
2566  B - matrix
2567  IB - submatrix offset
2568  JB - submatrix offset
2569  OpTypeB - transformation type:
2570  * 0 - no transformation
2571  * 1 - transposition
2572  * 2 - conjugate transposition
2573  Beta - coefficient
2574  C - matrix (PREALLOCATED, large enough to store result)
2575  IC - submatrix offset
2576  JC - submatrix offset
2577 
2578  -- ALGLIB routine --
2579  16.12.2009
2580  Bochkanov Sergey
2581 *************************************************************************/
2582 void cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::complex beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc);
2583 void smp_cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::complex beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc);
2584 
2585 
2586 /*************************************************************************
2587 This subroutine calculates C = alpha*op1(A)*op2(B) +beta*C where:
2588 * C is MxN general matrix
2589 * op1(A) is MxK matrix
2590 * op2(B) is KxN matrix
2591 * "op" may be identity transformation, transposition
2592 
2593 Additional info:
2594 * cache-oblivious algorithm is used.
2595 * multiplication result replaces C. If Beta=0, C elements are not used in
2596  calculations (not multiplied by zero - just not referenced)
2597 * if Alpha=0, A is not used (not multiplied by zero - just not referenced)
2598 * if both Beta and Alpha are zero, C is filled by zeros.
2599 
2600 COMMERCIAL EDITION OF ALGLIB:
2601 
2602  ! Commercial version of ALGLIB includes two important improvements of
2603  ! this function, which can be used from C++ and C#:
2604  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2605  ! * multicore support
2606  !
2607  ! Intel MKL gives approximately constant (with respect to number of
2608  ! worker threads) acceleration factor which depends on CPU being used,
2609  ! problem size and "baseline" ALGLIB edition which is used for
2610  ! comparison.
2611  !
2612  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2613  ! * about 2-3x faster than ALGLIB for C++ without MKL
2614  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2615  ! Difference in performance will be more striking on newer CPU's with
2616  ! support for newer SIMD instructions.
2617  !
2618  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2619  ! of this function. Because starting/stopping worker thread always
2620  ! involves some overhead, parallelism starts to be profitable for N's
2621  ! larger than 128.
2622  !
2623  ! In order to use multicore features you have to:
2624  ! * use commercial version of ALGLIB
2625  ! * call this function with "smp_" prefix, which indicates that
2626  ! multicore code will be used (for multicore support)
2627  !
2628  ! We recommend you to read 'Working with commercial version' section of
2629  ! ALGLIB Reference Manual in order to find out how to use performance-
2630  ! related features provided by commercial edition of ALGLIB.
2631 
2632 IMPORTANT:
2633 
2634 This function does NOT preallocate output matrix C, it MUST be preallocated
2635 by caller prior to calling this function. In case C does not have enough
2636 space to store result, exception will be generated.
2637 
2638 INPUT PARAMETERS
2639  M - matrix size, M>0
2640  N - matrix size, N>0
2641  K - matrix size, K>0
2642  Alpha - coefficient
2643  A - matrix
2644  IA - submatrix offset
2645  JA - submatrix offset
2646  OpTypeA - transformation type:
2647  * 0 - no transformation
2648  * 1 - transposition
2649  B - matrix
2650  IB - submatrix offset
2651  JB - submatrix offset
2652  OpTypeB - transformation type:
2653  * 0 - no transformation
2654  * 1 - transposition
2655  Beta - coefficient
2656  C - PREALLOCATED output matrix, large enough to store result
2657  IC - submatrix offset
2658  JC - submatrix offset
2659 
2660  -- ALGLIB routine --
2661  2009-2013
2662  Bochkanov Sergey
2663 *************************************************************************/
2664 void rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc);
2665 void smp_rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc);
2666 
2667 
2668 /*************************************************************************
2669 This subroutine is an older version of CMatrixHERK(), one with wrong name
2670 (it is HErmitian update, not SYmmetric). It is left here for backward
2671 compatibility.
2672 
2673  -- ALGLIB routine --
2674  16.12.2009
2675  Bochkanov Sergey
2676 *************************************************************************/
2677 void cmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper);
2678 void smp_cmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper);
2679 
2680 /*************************************************************************
2681 LU decomposition of a general real matrix with row pivoting
2682 
2683 A is represented as A = P*L*U, where:
2684 * L is lower unitriangular matrix
2685 * U is upper triangular matrix
2686 * P = P0*P1*...*PK, K=min(M,N)-1,
2687  Pi - permutation matrix for I and Pivots[I]
2688 
2689 This is cache-oblivous implementation of LU decomposition.
2690 It is optimized for square matrices. As for rectangular matrices:
2691 * best case - M>>N
2692 * worst case - N>>M, small M, large N, matrix does not fit in CPU cache
2693 
2694 COMMERCIAL EDITION OF ALGLIB:
2695 
2696  ! Commercial version of ALGLIB includes two important improvements of
2697  ! this function, which can be used from C++ and C#:
2698  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2699  ! * multicore support
2700  !
2701  ! Intel MKL gives approximately constant (with respect to number of
2702  ! worker threads) acceleration factor which depends on CPU being used,
2703  ! problem size and "baseline" ALGLIB edition which is used for
2704  ! comparison.
2705  !
2706  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2707  ! * about 2-3x faster than ALGLIB for C++ without MKL
2708  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2709  ! Difference in performance will be more striking on newer CPU's with
2710  ! support for newer SIMD instructions. Generally, MKL accelerates any
2711  ! problem whose size is at least 128, with best efficiency achieved for
2712  ! N's larger than 512.
2713  !
2714  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2715  ! of this function. We should note that LU decomposition is harder to
2716  ! parallelize than, say, matrix-matrix product - this algorithm has
2717  ! many internal synchronization points which can not be avoided. However
2718  ! parallelism starts to be profitable starting from N=1024, achieving
2719  ! near-linear speedup for N=4096 or higher.
2720  !
2721  ! In order to use multicore features you have to:
2722  ! * use commercial version of ALGLIB
2723  ! * call this function with "smp_" prefix, which indicates that
2724  ! multicore code will be used (for multicore support)
2725  !
2726  ! We recommend you to read 'Working with commercial version' section of
2727  ! ALGLIB Reference Manual in order to find out how to use performance-
2728  ! related features provided by commercial edition of ALGLIB.
2729 
2730 INPUT PARAMETERS:
2731  A - array[0..M-1, 0..N-1].
2732  M - number of rows in matrix A.
2733  N - number of columns in matrix A.
2734 
2735 
2736 OUTPUT PARAMETERS:
2737  A - matrices L and U in compact form:
2738  * L is stored under main diagonal
2739  * U is stored on and above main diagonal
2740  Pivots - permutation matrix in compact form.
2741  array[0..Min(M-1,N-1)].
2742 
2743  -- ALGLIB routine --
2744  10.01.2010
2745  Bochkanov Sergey
2746 *************************************************************************/
2747 void rmatrixlu(real_2d_array &a, const ae_int_t m, const ae_int_t n, integer_1d_array &pivots);
2748 void smp_rmatrixlu(real_2d_array &a, const ae_int_t m, const ae_int_t n, integer_1d_array &pivots);
2749 
2750 
2751 /*************************************************************************
2752 LU decomposition of a general complex matrix with row pivoting
2753 
2754 A is represented as A = P*L*U, where:
2755 * L is lower unitriangular matrix
2756 * U is upper triangular matrix
2757 * P = P0*P1*...*PK, K=min(M,N)-1,
2758  Pi - permutation matrix for I and Pivots[I]
2759 
2760 This is cache-oblivous implementation of LU decomposition. It is optimized
2761 for square matrices. As for rectangular matrices:
2762 * best case - M>>N
2763 * worst case - N>>M, small M, large N, matrix does not fit in CPU cache
2764 
2765 COMMERCIAL EDITION OF ALGLIB:
2766 
2767  ! Commercial version of ALGLIB includes two important improvements of
2768  ! this function, which can be used from C++ and C#:
2769  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2770  ! * multicore support
2771  !
2772  ! Intel MKL gives approximately constant (with respect to number of
2773  ! worker threads) acceleration factor which depends on CPU being used,
2774  ! problem size and "baseline" ALGLIB edition which is used for
2775  ! comparison.
2776  !
2777  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2778  ! * about 2-3x faster than ALGLIB for C++ without MKL
2779  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2780  ! Difference in performance will be more striking on newer CPU's with
2781  ! support for newer SIMD instructions. Generally, MKL accelerates any
2782  ! problem whose size is at least 128, with best efficiency achieved for
2783  ! N's larger than 512.
2784  !
2785  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2786  ! of this function. We should note that LU decomposition is harder to
2787  ! parallelize than, say, matrix-matrix product - this algorithm has
2788  ! many internal synchronization points which can not be avoided. However
2789  ! parallelism starts to be profitable starting from N=1024, achieving
2790  ! near-linear speedup for N=4096 or higher.
2791  !
2792  ! In order to use multicore features you have to:
2793  ! * use commercial version of ALGLIB
2794  ! * call this function with "smp_" prefix, which indicates that
2795  ! multicore code will be used (for multicore support)
2796  !
2797  ! We recommend you to read 'Working with commercial version' section of
2798  ! ALGLIB Reference Manual in order to find out how to use performance-
2799  ! related features provided by commercial edition of ALGLIB.
2800 
2801 INPUT PARAMETERS:
2802  A - array[0..M-1, 0..N-1].
2803  M - number of rows in matrix A.
2804  N - number of columns in matrix A.
2805 
2806 
2807 OUTPUT PARAMETERS:
2808  A - matrices L and U in compact form:
2809  * L is stored under main diagonal
2810  * U is stored on and above main diagonal
2811  Pivots - permutation matrix in compact form.
2812  array[0..Min(M-1,N-1)].
2813 
2814  -- ALGLIB routine --
2815  10.01.2010
2816  Bochkanov Sergey
2817 *************************************************************************/
2818 void cmatrixlu(complex_2d_array &a, const ae_int_t m, const ae_int_t n, integer_1d_array &pivots);
2820 
2821 
2822 /*************************************************************************
2823 Cache-oblivious Cholesky decomposition
2824 
2825 The algorithm computes Cholesky decomposition of a Hermitian positive-
2826 definite matrix. The result of an algorithm is a representation of A as
2827 A=U'*U or A=L*L' (here X' detones conj(X^T)).
2828 
2829 COMMERCIAL EDITION OF ALGLIB:
2830 
2831  ! Commercial version of ALGLIB includes two important improvements of
2832  ! this function, which can be used from C++ and C#:
2833  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2834  ! * multicore support
2835  !
2836  ! Intel MKL gives approximately constant (with respect to number of
2837  ! worker threads) acceleration factor which depends on CPU being used,
2838  ! problem size and "baseline" ALGLIB edition which is used for
2839  ! comparison.
2840  !
2841  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2842  ! * about 2-3x faster than ALGLIB for C++ without MKL
2843  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2844  ! Difference in performance will be more striking on newer CPU's with
2845  ! support for newer SIMD instructions. Generally, MKL accelerates any
2846  ! problem whose size is at least 128, with best efficiency achieved for
2847  ! N's larger than 512.
2848  !
2849  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2850  ! of this function. We should note that Cholesky decomposition is harder
2851  ! to parallelize than, say, matrix-matrix product - this algorithm has
2852  ! several synchronization points which can not be avoided. However,
2853  ! parallelism starts to be profitable starting from N=500.
2854  !
2855  ! In order to use multicore features you have to:
2856  ! * use commercial version of ALGLIB
2857  ! * call this function with "smp_" prefix, which indicates that
2858  ! multicore code will be used (for multicore support)
2859  !
2860  ! We recommend you to read 'Working with commercial version' section of
2861  ! ALGLIB Reference Manual in order to find out how to use performance-
2862  ! related features provided by commercial edition of ALGLIB.
2863 
2864 INPUT PARAMETERS:
2865  A - upper or lower triangle of a factorized matrix.
2866  array with elements [0..N-1, 0..N-1].
2867  N - size of matrix A.
2868  IsUpper - if IsUpper=True, then A contains an upper triangle of
2869  a symmetric matrix, otherwise A contains a lower one.
2870 
2871 OUTPUT PARAMETERS:
2872  A - the result of factorization. If IsUpper=True, then
2873  the upper triangle contains matrix U, so that A = U'*U,
2874  and the elements below the main diagonal are not modified.
2875  Similarly, if IsUpper = False.
2876 
2877 RESULT:
2878  If the matrix is positive-definite, the function returns True.
2879  Otherwise, the function returns False. Contents of A is not determined
2880  in such case.
2881 
2882  -- ALGLIB routine --
2883  15.12.2009
2884  Bochkanov Sergey
2885 *************************************************************************/
2886 bool hpdmatrixcholesky(complex_2d_array &a, const ae_int_t n, const bool isupper);
2887 bool smp_hpdmatrixcholesky(complex_2d_array &a, const ae_int_t n, const bool isupper);
2888 
2889 
2890 /*************************************************************************
2891 Cache-oblivious Cholesky decomposition
2892 
2893 The algorithm computes Cholesky decomposition of a symmetric positive-
2894 definite matrix. The result of an algorithm is a representation of A as
2895 A=U^T*U or A=L*L^T
2896 
2897 COMMERCIAL EDITION OF ALGLIB:
2898 
2899  ! Commercial version of ALGLIB includes two important improvements of
2900  ! this function, which can be used from C++ and C#:
2901  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
2902  ! * multicore support
2903  !
2904  ! Intel MKL gives approximately constant (with respect to number of
2905  ! worker threads) acceleration factor which depends on CPU being used,
2906  ! problem size and "baseline" ALGLIB edition which is used for
2907  ! comparison.
2908  !
2909  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
2910  ! * about 2-3x faster than ALGLIB for C++ without MKL
2911  ! * about 7-10x faster than "pure C#" edition of ALGLIB
2912  ! Difference in performance will be more striking on newer CPU's with
2913  ! support for newer SIMD instructions. Generally, MKL accelerates any
2914  ! problem whose size is at least 128, with best efficiency achieved for
2915  ! N's larger than 512.
2916  !
2917  ! Commercial edition of ALGLIB also supports multithreaded acceleration
2918  ! of this function. We should note that Cholesky decomposition is harder
2919  ! to parallelize than, say, matrix-matrix product - this algorithm has
2920  ! several synchronization points which can not be avoided. However,
2921  ! parallelism starts to be profitable starting from N=500.
2922  !
2923  ! In order to use multicore features you have to:
2924  ! * use commercial version of ALGLIB
2925  ! * call this function with "smp_" prefix, which indicates that
2926  ! multicore code will be used (for multicore support)
2927  !
2928  ! We recommend you to read 'Working with commercial version' section of
2929  ! ALGLIB Reference Manual in order to find out how to use performance-
2930  ! related features provided by commercial edition of ALGLIB.
2931 
2932 INPUT PARAMETERS:
2933  A - upper or lower triangle of a factorized matrix.
2934  array with elements [0..N-1, 0..N-1].
2935  N - size of matrix A.
2936  IsUpper - if IsUpper=True, then A contains an upper triangle of
2937  a symmetric matrix, otherwise A contains a lower one.
2938 
2939 OUTPUT PARAMETERS:
2940  A - the result of factorization. If IsUpper=True, then
2941  the upper triangle contains matrix U, so that A = U^T*U,
2942  and the elements below the main diagonal are not modified.
2943  Similarly, if IsUpper = False.
2944 
2945 RESULT:
2946  If the matrix is positive-definite, the function returns True.
2947  Otherwise, the function returns False. Contents of A is not determined
2948  in such case.
2949 
2950  -- ALGLIB routine --
2951  15.12.2009
2952  Bochkanov Sergey
2953 *************************************************************************/
2954 bool spdmatrixcholesky(real_2d_array &a, const ae_int_t n, const bool isupper);
2955 bool smp_spdmatrixcholesky(real_2d_array &a, const ae_int_t n, const bool isupper);
2956 
2957 
2958 /*************************************************************************
2959 Update of Cholesky decomposition: rank-1 update to original A. "Buffered"
2960 version which uses preallocated buffer which is saved between subsequent
2961 function calls.
2962 
2963 This function uses internally allocated buffer which is not saved between
2964 subsequent calls. So, if you perform a lot of subsequent updates,
2965 we recommend you to use "buffered" version of this function:
2966 SPDMatrixCholeskyUpdateAdd1Buf().
2967 
2968 INPUT PARAMETERS:
2969  A - upper or lower Cholesky factor.
2970  array with elements [0..N-1, 0..N-1].
2971  Exception is thrown if array size is too small.
2972  N - size of matrix A, N>0
2973  IsUpper - if IsUpper=True, then A contains upper Cholesky factor;
2974  otherwise A contains a lower one.
2975  U - array[N], rank-1 update to A: A_mod = A + u*u'
2976  Exception is thrown if array size is too small.
2977  BufR - possibly preallocated buffer; automatically resized if
2978  needed. It is recommended to reuse this buffer if you
2979  perform a lot of subsequent decompositions.
2980 
2981 OUTPUT PARAMETERS:
2982  A - updated factorization. If IsUpper=True, then the upper
2983  triangle contains matrix U, and the elements below the main
2984  diagonal are not modified. Similarly, if IsUpper = False.
2985 
2986 NOTE: this function always succeeds, so it does not return completion code
2987 
2988 NOTE: this function checks sizes of input arrays, but it does NOT checks
2989  for presence of infinities or NAN's.
2990 
2991  -- ALGLIB --
2992  03.02.2014
2993  Sergey Bochkanov
2994 *************************************************************************/
2995 void spdmatrixcholeskyupdateadd1(const real_2d_array &a, const ae_int_t n, const bool isupper, const real_1d_array &u);
2996 
2997 
2998 /*************************************************************************
2999 Update of Cholesky decomposition: "fixing" some variables.
3000 
3001 This function uses internally allocated buffer which is not saved between
3002 subsequent calls. So, if you perform a lot of subsequent updates,
3003 we recommend you to use "buffered" version of this function:
3004 SPDMatrixCholeskyUpdateFixBuf().
3005 
3006 "FIXING" EXPLAINED:
3007 
3008  Suppose we have N*N positive definite matrix A. "Fixing" some variable
3009  means filling corresponding row/column of A by zeros, and setting
3010  diagonal element to 1.
3011 
3012  For example, if we fix 2nd variable in 4*4 matrix A, it becomes Af:
3013 
3014  ( A00 A01 A02 A03 ) ( Af00 0 Af02 Af03 )
3015  ( A10 A11 A12 A13 ) ( 0 1 0 0 )
3016  ( A20 A21 A22 A23 ) => ( Af20 0 Af22 Af23 )
3017  ( A30 A31 A32 A33 ) ( Af30 0 Af32 Af33 )
3018 
3019  If we have Cholesky decomposition of A, it must be recalculated after
3020  variables were fixed. However, it is possible to use efficient
3021  algorithm, which needs O(K*N^2) time to "fix" K variables, given
3022  Cholesky decomposition of original, "unfixed" A.
3023 
3024 INPUT PARAMETERS:
3025  A - upper or lower Cholesky factor.
3026  array with elements [0..N-1, 0..N-1].
3027  Exception is thrown if array size is too small.
3028  N - size of matrix A, N>0
3029  IsUpper - if IsUpper=True, then A contains upper Cholesky factor;
3030  otherwise A contains a lower one.
3031  Fix - array[N], I-th element is True if I-th variable must be
3032  fixed. Exception is thrown if array size is too small.
3033  BufR - possibly preallocated buffer; automatically resized if
3034  needed. It is recommended to reuse this buffer if you
3035  perform a lot of subsequent decompositions.
3036 
3037 OUTPUT PARAMETERS:
3038  A - updated factorization. If IsUpper=True, then the upper
3039  triangle contains matrix U, and the elements below the main
3040  diagonal are not modified. Similarly, if IsUpper = False.
3041 
3042 NOTE: this function always succeeds, so it does not return completion code
3043 
3044 NOTE: this function checks sizes of input arrays, but it does NOT checks
3045  for presence of infinities or NAN's.
3046 
3047 NOTE: this function is efficient only for moderate amount of updated
3048  variables - say, 0.1*N or 0.3*N. For larger amount of variables it
3049  will still work, but you may get better performance with
3050  straightforward Cholesky.
3051 
3052  -- ALGLIB --
3053  03.02.2014
3054  Sergey Bochkanov
3055 *************************************************************************/
3056 void spdmatrixcholeskyupdatefix(const real_2d_array &a, const ae_int_t n, const bool isupper, const boolean_1d_array &fix);
3057 
3058 
3059 /*************************************************************************
3060 Update of Cholesky decomposition: rank-1 update to original A. "Buffered"
3061 version which uses preallocated buffer which is saved between subsequent
3062 function calls.
3063 
3064 See comments for SPDMatrixCholeskyUpdateAdd1() for more information.
3065 
3066 INPUT PARAMETERS:
3067  A - upper or lower Cholesky factor.
3068  array with elements [0..N-1, 0..N-1].
3069  Exception is thrown if array size is too small.
3070  N - size of matrix A, N>0
3071  IsUpper - if IsUpper=True, then A contains upper Cholesky factor;
3072  otherwise A contains a lower one.
3073  U - array[N], rank-1 update to A: A_mod = A + u*u'
3074  Exception is thrown if array size is too small.
3075  BufR - possibly preallocated buffer; automatically resized if
3076  needed. It is recommended to reuse this buffer if you
3077  perform a lot of subsequent decompositions.
3078 
3079 OUTPUT PARAMETERS:
3080  A - updated factorization. If IsUpper=True, then the upper
3081  triangle contains matrix U, and the elements below the main
3082  diagonal are not modified. Similarly, if IsUpper = False.
3083 
3084  -- ALGLIB --
3085  03.02.2014
3086  Sergey Bochkanov
3087 *************************************************************************/
3088 void spdmatrixcholeskyupdateadd1buf(const real_2d_array &a, const ae_int_t n, const bool isupper, const real_1d_array &u, real_1d_array &bufr);
3089 
3090 
3091 /*************************************************************************
3092 Update of Cholesky decomposition: "fixing" some variables. "Buffered"
3093 version which uses preallocated buffer which is saved between subsequent
3094 function calls.
3095 
3096 See comments for SPDMatrixCholeskyUpdateFix() for more information.
3097 
3098 INPUT PARAMETERS:
3099  A - upper or lower Cholesky factor.
3100  array with elements [0..N-1, 0..N-1].
3101  Exception is thrown if array size is too small.
3102  N - size of matrix A, N>0
3103  IsUpper - if IsUpper=True, then A contains upper Cholesky factor;
3104  otherwise A contains a lower one.
3105  Fix - array[N], I-th element is True if I-th variable must be
3106  fixed. Exception is thrown if array size is too small.
3107  BufR - possibly preallocated buffer; automatically resized if
3108  needed. It is recommended to reuse this buffer if you
3109  perform a lot of subsequent decompositions.
3110 
3111 OUTPUT PARAMETERS:
3112  A - updated factorization. If IsUpper=True, then the upper
3113  triangle contains matrix U, and the elements below the main
3114  diagonal are not modified. Similarly, if IsUpper = False.
3115 
3116  -- ALGLIB --
3117  03.02.2014
3118  Sergey Bochkanov
3119 *************************************************************************/
3120 void spdmatrixcholeskyupdatefixbuf(const real_2d_array &a, const ae_int_t n, const bool isupper, const boolean_1d_array &fix, real_1d_array &bufr);
3121 
3122 
3123 /*************************************************************************
3124 Sparse Cholesky decomposition for skyline matrixm using in-place algorithm
3125 without allocating additional storage.
3126 
3127 The algorithm computes Cholesky decomposition of a symmetric positive-
3128 definite sparse matrix. The result of an algorithm is a representation of
3129 A as A=U^T*U or A=L*L^T
3130 
3131 This function is a more efficient alternative to general, but slower
3132 SparseCholeskyX(), because it does not create temporary copies of the
3133 target. It performs factorization in-place, which gives best performance
3134 on low-profile matrices. Its drawback, however, is that it can not perform
3135 profile-reducing permutation of input matrix.
3136 
3137 INPUT PARAMETERS:
3138  A - sparse matrix in skyline storage (SKS) format.
3139  N - size of matrix A (can be smaller than actual size of A)
3140  IsUpper - if IsUpper=True, then factorization is performed on upper
3141  triangle. Another triangle is ignored (it may contant some
3142  data, but it is not changed).
3143 
3144 
3145 OUTPUT PARAMETERS:
3146  A - the result of factorization, stored in SKS. If IsUpper=True,
3147  then the upper triangle contains matrix U, such that
3148  A = U^T*U. Lower triangle is not changed.
3149  Similarly, if IsUpper = False. In this case L is returned,
3150  and we have A = L*(L^T).
3151  Note that THIS function does not perform permutation of
3152  rows to reduce bandwidth.
3153 
3154 RESULT:
3155  If the matrix is positive-definite, the function returns True.
3156  Otherwise, the function returns False. Contents of A is not determined
3157  in such case.
3158 
3159 NOTE: for performance reasons this function does NOT check that input
3160  matrix includes only finite values. It is your responsibility to
3161  make sure that there are no infinite or NAN values in the matrix.
3162 
3163  -- ALGLIB routine --
3164  16.01.2014
3165  Bochkanov Sergey
3166 *************************************************************************/
3167 bool sparsecholeskyskyline(const sparsematrix &a, const ae_int_t n, const bool isupper);
3168 
3169 /*************************************************************************
3170 Estimate of a matrix condition number (1-norm)
3171 
3172 The algorithm calculates a lower bound of the condition number. In this case,
3173 the algorithm does not return a lower bound of the condition number, but an
3174 inverse number (to avoid an overflow in case of a singular matrix).
3175 
3176 Input parameters:
3177  A - matrix. Array whose indexes range within [0..N-1, 0..N-1].
3178  N - size of matrix A.
3179 
3180 Result: 1/LowerBound(cond(A))
3181 
3182 NOTE:
3183  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3184  0.0 is returned in such cases.
3185 *************************************************************************/
3186 double rmatrixrcond1(const real_2d_array &a, const ae_int_t n);
3187 
3188 
3189 /*************************************************************************
3190 Estimate of a matrix condition number (infinity-norm).
3191 
3192 The algorithm calculates a lower bound of the condition number. In this case,
3193 the algorithm does not return a lower bound of the condition number, but an
3194 inverse number (to avoid an overflow in case of a singular matrix).
3195 
3196 Input parameters:
3197  A - matrix. Array whose indexes range within [0..N-1, 0..N-1].
3198  N - size of matrix A.
3199 
3200 Result: 1/LowerBound(cond(A))
3201 
3202 NOTE:
3203  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3204  0.0 is returned in such cases.
3205 *************************************************************************/
3206 double rmatrixrcondinf(const real_2d_array &a, const ae_int_t n);
3207 
3208 
3209 /*************************************************************************
3210 Condition number estimate of a symmetric positive definite matrix.
3211 
3212 The algorithm calculates a lower bound of the condition number. In this case,
3213 the algorithm does not return a lower bound of the condition number, but an
3214 inverse number (to avoid an overflow in case of a singular matrix).
3215 
3216 It should be noted that 1-norm and inf-norm of condition numbers of symmetric
3217 matrices are equal, so the algorithm doesn't take into account the
3218 differences between these types of norms.
3219 
3220 Input parameters:
3221  A - symmetric positive definite matrix which is given by its
3222  upper or lower triangle depending on the value of
3223  IsUpper. Array with elements [0..N-1, 0..N-1].
3224  N - size of matrix A.
3225  IsUpper - storage format.
3226 
3227 Result:
3228  1/LowerBound(cond(A)), if matrix A is positive definite,
3229  -1, if matrix A is not positive definite, and its condition number
3230  could not be found by this algorithm.
3231 
3232 NOTE:
3233  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3234  0.0 is returned in such cases.
3235 *************************************************************************/
3236 double spdmatrixrcond(const real_2d_array &a, const ae_int_t n, const bool isupper);
3237 
3238 
3239 /*************************************************************************
3240 Triangular matrix: estimate of a condition number (1-norm)
3241 
3242 The algorithm calculates a lower bound of the condition number. In this case,
3243 the algorithm does not return a lower bound of the condition number, but an
3244 inverse number (to avoid an overflow in case of a singular matrix).
3245 
3246 Input parameters:
3247  A - matrix. Array[0..N-1, 0..N-1].
3248  N - size of A.
3249  IsUpper - True, if the matrix is upper triangular.
3250  IsUnit - True, if the matrix has a unit diagonal.
3251 
3252 Result: 1/LowerBound(cond(A))
3253 
3254 NOTE:
3255  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3256  0.0 is returned in such cases.
3257 *************************************************************************/
3258 double rmatrixtrrcond1(const real_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit);
3259 
3260 
3261 /*************************************************************************
3262 Triangular matrix: estimate of a matrix condition number (infinity-norm).
3263 
3264 The algorithm calculates a lower bound of the condition number. In this case,
3265 the algorithm does not return a lower bound of the condition number, but an
3266 inverse number (to avoid an overflow in case of a singular matrix).
3267 
3268 Input parameters:
3269  A - matrix. Array whose indexes range within [0..N-1, 0..N-1].
3270  N - size of matrix A.
3271  IsUpper - True, if the matrix is upper triangular.
3272  IsUnit - True, if the matrix has a unit diagonal.
3273 
3274 Result: 1/LowerBound(cond(A))
3275 
3276 NOTE:
3277  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3278  0.0 is returned in such cases.
3279 *************************************************************************/
3280 double rmatrixtrrcondinf(const real_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit);
3281 
3282 
3283 /*************************************************************************
3284 Condition number estimate of a Hermitian positive definite matrix.
3285 
3286 The algorithm calculates a lower bound of the condition number. In this case,
3287 the algorithm does not return a lower bound of the condition number, but an
3288 inverse number (to avoid an overflow in case of a singular matrix).
3289 
3290 It should be noted that 1-norm and inf-norm of condition numbers of symmetric
3291 matrices are equal, so the algorithm doesn't take into account the
3292 differences between these types of norms.
3293 
3294 Input parameters:
3295  A - Hermitian positive definite matrix which is given by its
3296  upper or lower triangle depending on the value of
3297  IsUpper. Array with elements [0..N-1, 0..N-1].
3298  N - size of matrix A.
3299  IsUpper - storage format.
3300 
3301 Result:
3302  1/LowerBound(cond(A)), if matrix A is positive definite,
3303  -1, if matrix A is not positive definite, and its condition number
3304  could not be found by this algorithm.
3305 
3306 NOTE:
3307  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3308  0.0 is returned in such cases.
3309 *************************************************************************/
3310 double hpdmatrixrcond(const complex_2d_array &a, const ae_int_t n, const bool isupper);
3311 
3312 
3313 /*************************************************************************
3314 Estimate of a matrix condition number (1-norm)
3315 
3316 The algorithm calculates a lower bound of the condition number. In this case,
3317 the algorithm does not return a lower bound of the condition number, but an
3318 inverse number (to avoid an overflow in case of a singular matrix).
3319 
3320 Input parameters:
3321  A - matrix. Array whose indexes range within [0..N-1, 0..N-1].
3322  N - size of matrix A.
3323 
3324 Result: 1/LowerBound(cond(A))
3325 
3326 NOTE:
3327  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3328  0.0 is returned in such cases.
3329 *************************************************************************/
3330 double cmatrixrcond1(const complex_2d_array &a, const ae_int_t n);
3331 
3332 
3333 /*************************************************************************
3334 Estimate of a matrix condition number (infinity-norm).
3335 
3336 The algorithm calculates a lower bound of the condition number. In this case,
3337 the algorithm does not return a lower bound of the condition number, but an
3338 inverse number (to avoid an overflow in case of a singular matrix).
3339 
3340 Input parameters:
3341  A - matrix. Array whose indexes range within [0..N-1, 0..N-1].
3342  N - size of matrix A.
3343 
3344 Result: 1/LowerBound(cond(A))
3345 
3346 NOTE:
3347  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3348  0.0 is returned in such cases.
3349 *************************************************************************/
3350 double cmatrixrcondinf(const complex_2d_array &a, const ae_int_t n);
3351 
3352 
3353 /*************************************************************************
3354 Estimate of the condition number of a matrix given by its LU decomposition (1-norm)
3355 
3356 The algorithm calculates a lower bound of the condition number. In this case,
3357 the algorithm does not return a lower bound of the condition number, but an
3358 inverse number (to avoid an overflow in case of a singular matrix).
3359 
3360 Input parameters:
3361  LUA - LU decomposition of a matrix in compact form. Output of
3362  the RMatrixLU subroutine.
3363  N - size of matrix A.
3364 
3365 Result: 1/LowerBound(cond(A))
3366 
3367 NOTE:
3368  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3369  0.0 is returned in such cases.
3370 *************************************************************************/
3371 double rmatrixlurcond1(const real_2d_array &lua, const ae_int_t n);
3372 
3373 
3374 /*************************************************************************
3375 Estimate of the condition number of a matrix given by its LU decomposition
3376 (infinity norm).
3377 
3378 The algorithm calculates a lower bound of the condition number. In this case,
3379 the algorithm does not return a lower bound of the condition number, but an
3380 inverse number (to avoid an overflow in case of a singular matrix).
3381 
3382 Input parameters:
3383  LUA - LU decomposition of a matrix in compact form. Output of
3384  the RMatrixLU subroutine.
3385  N - size of matrix A.
3386 
3387 Result: 1/LowerBound(cond(A))
3388 
3389 NOTE:
3390  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3391  0.0 is returned in such cases.
3392 *************************************************************************/
3393 double rmatrixlurcondinf(const real_2d_array &lua, const ae_int_t n);
3394 
3395 
3396 /*************************************************************************
3397 Condition number estimate of a symmetric positive definite matrix given by
3398 Cholesky decomposition.
3399 
3400 The algorithm calculates a lower bound of the condition number. In this
3401 case, the algorithm does not return a lower bound of the condition number,
3402 but an inverse number (to avoid an overflow in case of a singular matrix).
3403 
3404 It should be noted that 1-norm and inf-norm condition numbers of symmetric
3405 matrices are equal, so the algorithm doesn't take into account the
3406 differences between these types of norms.
3407 
3408 Input parameters:
3409  CD - Cholesky decomposition of matrix A,
3410  output of SMatrixCholesky subroutine.
3411  N - size of matrix A.
3412 
3413 Result: 1/LowerBound(cond(A))
3414 
3415 NOTE:
3416  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3417  0.0 is returned in such cases.
3418 *************************************************************************/
3419 double spdmatrixcholeskyrcond(const real_2d_array &a, const ae_int_t n, const bool isupper);
3420 
3421 
3422 /*************************************************************************
3423 Condition number estimate of a Hermitian positive definite matrix given by
3424 Cholesky decomposition.
3425 
3426 The algorithm calculates a lower bound of the condition number. In this
3427 case, the algorithm does not return a lower bound of the condition number,
3428 but an inverse number (to avoid an overflow in case of a singular matrix).
3429 
3430 It should be noted that 1-norm and inf-norm condition numbers of symmetric
3431 matrices are equal, so the algorithm doesn't take into account the
3432 differences between these types of norms.
3433 
3434 Input parameters:
3435  CD - Cholesky decomposition of matrix A,
3436  output of SMatrixCholesky subroutine.
3437  N - size of matrix A.
3438 
3439 Result: 1/LowerBound(cond(A))
3440 
3441 NOTE:
3442  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3443  0.0 is returned in such cases.
3444 *************************************************************************/
3445 double hpdmatrixcholeskyrcond(const complex_2d_array &a, const ae_int_t n, const bool isupper);
3446 
3447 
3448 /*************************************************************************
3449 Estimate of the condition number of a matrix given by its LU decomposition (1-norm)
3450 
3451 The algorithm calculates a lower bound of the condition number. In this case,
3452 the algorithm does not return a lower bound of the condition number, but an
3453 inverse number (to avoid an overflow in case of a singular matrix).
3454 
3455 Input parameters:
3456  LUA - LU decomposition of a matrix in compact form. Output of
3457  the CMatrixLU subroutine.
3458  N - size of matrix A.
3459 
3460 Result: 1/LowerBound(cond(A))
3461 
3462 NOTE:
3463  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3464  0.0 is returned in such cases.
3465 *************************************************************************/
3466 double cmatrixlurcond1(const complex_2d_array &lua, const ae_int_t n);
3467 
3468 
3469 /*************************************************************************
3470 Estimate of the condition number of a matrix given by its LU decomposition
3471 (infinity norm).
3472 
3473 The algorithm calculates a lower bound of the condition number. In this case,
3474 the algorithm does not return a lower bound of the condition number, but an
3475 inverse number (to avoid an overflow in case of a singular matrix).
3476 
3477 Input parameters:
3478  LUA - LU decomposition of a matrix in compact form. Output of
3479  the CMatrixLU subroutine.
3480  N - size of matrix A.
3481 
3482 Result: 1/LowerBound(cond(A))
3483 
3484 NOTE:
3485  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3486  0.0 is returned in such cases.
3487 *************************************************************************/
3488 double cmatrixlurcondinf(const complex_2d_array &lua, const ae_int_t n);
3489 
3490 
3491 /*************************************************************************
3492 Triangular matrix: estimate of a condition number (1-norm)
3493 
3494 The algorithm calculates a lower bound of the condition number. In this case,
3495 the algorithm does not return a lower bound of the condition number, but an
3496 inverse number (to avoid an overflow in case of a singular matrix).
3497 
3498 Input parameters:
3499  A - matrix. Array[0..N-1, 0..N-1].
3500  N - size of A.
3501  IsUpper - True, if the matrix is upper triangular.
3502  IsUnit - True, if the matrix has a unit diagonal.
3503 
3504 Result: 1/LowerBound(cond(A))
3505 
3506 NOTE:
3507  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3508  0.0 is returned in such cases.
3509 *************************************************************************/
3510 double cmatrixtrrcond1(const complex_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit);
3511 
3512 
3513 /*************************************************************************
3514 Triangular matrix: estimate of a matrix condition number (infinity-norm).
3515 
3516 The algorithm calculates a lower bound of the condition number. In this case,
3517 the algorithm does not return a lower bound of the condition number, but an
3518 inverse number (to avoid an overflow in case of a singular matrix).
3519 
3520 Input parameters:
3521  A - matrix. Array whose indexes range within [0..N-1, 0..N-1].
3522  N - size of matrix A.
3523  IsUpper - True, if the matrix is upper triangular.
3524  IsUnit - True, if the matrix has a unit diagonal.
3525 
3526 Result: 1/LowerBound(cond(A))
3527 
3528 NOTE:
3529  if k(A) is very large, then matrix is assumed degenerate, k(A)=INF,
3530  0.0 is returned in such cases.
3531 *************************************************************************/
3532 double cmatrixtrrcondinf(const complex_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit);
3533 
3534 /*************************************************************************
3535 Inversion of a matrix given by its LU decomposition.
3536 
3537 COMMERCIAL EDITION OF ALGLIB:
3538 
3539  ! Commercial version of ALGLIB includes two important improvements of
3540  ! this function, which can be used from C++ and C#:
3541  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3542  ! * multicore support
3543  !
3544  ! Intel MKL gives approximately constant (with respect to number of
3545  ! worker threads) acceleration factor which depends on CPU being used,
3546  ! problem size and "baseline" ALGLIB edition which is used for
3547  ! comparison.
3548  !
3549  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
3550  ! * about 2-3x faster than ALGLIB for C++ without MKL
3551  ! * about 7-10x faster than "pure C#" edition of ALGLIB
3552  ! Difference in performance will be more striking on newer CPU's with
3553  ! support for newer SIMD instructions. Generally, MKL accelerates any
3554  ! problem whose size is at least 128, with best efficiency achieved for
3555  ! N's larger than 512.
3556  !
3557  ! Commercial edition of ALGLIB also supports multithreaded acceleration
3558  ! of this function. We should note that matrix inversion is harder to
3559  ! parallelize than, say, matrix-matrix product - this algorithm has
3560  ! many internal synchronization points which can not be avoided. However
3561  ! parallelism starts to be profitable starting from N=1024, achieving
3562  ! near-linear speedup for N=4096 or higher.
3563  !
3564  ! In order to use multicore features you have to:
3565  ! * use commercial version of ALGLIB
3566  ! * call this function with "smp_" prefix, which indicates that
3567  ! multicore code will be used (for multicore support)
3568  !
3569  ! We recommend you to read 'Working with commercial version' section of
3570  ! ALGLIB Reference Manual in order to find out how to use performance-
3571  ! related features provided by commercial edition of ALGLIB.
3572 
3573 INPUT PARAMETERS:
3574  A - LU decomposition of the matrix
3575  (output of RMatrixLU subroutine).
3576  Pivots - table of permutations
3577  (the output of RMatrixLU subroutine).
3578  N - size of matrix A (optional) :
3579  * if given, only principal NxN submatrix is processed and
3580  overwritten. other elements are unchanged.
3581  * if not given, size is automatically determined from
3582  matrix size (A must be square matrix)
3583 
3584 OUTPUT PARAMETERS:
3585  Info - return code:
3586  * -3 A is singular, or VERY close to singular.
3587  it is filled by zeros in such cases.
3588  * 1 task is solved (but matrix A may be ill-conditioned,
3589  check R1/RInf parameters for condition numbers).
3590  Rep - solver report, see below for more info
3591  A - inverse of matrix A.
3592  Array whose indexes range within [0..N-1, 0..N-1].
3593 
3594 SOLVER REPORT
3595 
3596 Subroutine sets following fields of the Rep structure:
3597 * R1 reciprocal of condition number: 1/cond(A), 1-norm.
3598 * RInf reciprocal of condition number: 1/cond(A), inf-norm.
3599 
3600  -- ALGLIB routine --
3601  05.02.2010
3602  Bochkanov Sergey
3603 *************************************************************************/
3604 void rmatrixluinverse(real_2d_array &a, const integer_1d_array &pivots, const ae_int_t n, ae_int_t &info, matinvreport &rep);
3608 
3609 
3610 /*************************************************************************
3611 Inversion of a general matrix.
3612 
3613 COMMERCIAL EDITION OF ALGLIB:
3614 
3615  ! Commercial version of ALGLIB includes two important improvements of
3616  ! this function, which can be used from C++ and C#:
3617  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3618  ! * multicore support
3619  !
3620  ! Intel MKL gives approximately constant (with respect to number of
3621  ! worker threads) acceleration factor which depends on CPU being used,
3622  ! problem size and "baseline" ALGLIB edition which is used for
3623  ! comparison.
3624  !
3625  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
3626  ! * about 2-3x faster than ALGLIB for C++ without MKL
3627  ! * about 7-10x faster than "pure C#" edition of ALGLIB
3628  ! Difference in performance will be more striking on newer CPU's with
3629  ! support for newer SIMD instructions. Generally, MKL accelerates any
3630  ! problem whose size is at least 128, with best efficiency achieved for
3631  ! N's larger than 512.
3632  !
3633  ! Commercial edition of ALGLIB also supports multithreaded acceleration
3634  ! of this function. We should note that matrix inversion is harder to
3635  ! parallelize than, say, matrix-matrix product - this algorithm has
3636  ! many internal synchronization points which can not be avoided. However
3637  ! parallelism starts to be profitable starting from N=1024, achieving
3638  ! near-linear speedup for N=4096 or higher.
3639  !
3640  ! In order to use multicore features you have to:
3641  ! * use commercial version of ALGLIB
3642  ! * call this function with "smp_" prefix, which indicates that
3643  ! multicore code will be used (for multicore support)
3644  !
3645  ! We recommend you to read 'Working with commercial version' section of
3646  ! ALGLIB Reference Manual in order to find out how to use performance-
3647  ! related features provided by commercial edition of ALGLIB.
3648 
3649 Input parameters:
3650  A - matrix.
3651  N - size of matrix A (optional) :
3652  * if given, only principal NxN submatrix is processed and
3653  overwritten. other elements are unchanged.
3654  * if not given, size is automatically determined from
3655  matrix size (A must be square matrix)
3656 
3657 Output parameters:
3658  Info - return code, same as in RMatrixLUInverse
3659  Rep - solver report, same as in RMatrixLUInverse
3660  A - inverse of matrix A, same as in RMatrixLUInverse
3661 
3662 Result:
3663  True, if the matrix is not singular.
3664  False, if the matrix is singular.
3665 
3666  -- ALGLIB --
3667  Copyright 2005-2010 by Bochkanov Sergey
3668 *************************************************************************/
3673 
3674 
3675 /*************************************************************************
3676 Inversion of a matrix given by its LU decomposition.
3677 
3678 COMMERCIAL EDITION OF ALGLIB:
3679 
3680  ! Commercial version of ALGLIB includes two important improvements of
3681  ! this function, which can be used from C++ and C#:
3682  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3683  ! * multicore support
3684  !
3685  ! Intel MKL gives approximately constant (with respect to number of
3686  ! worker threads) acceleration factor which depends on CPU being used,
3687  ! problem size and "baseline" ALGLIB edition which is used for
3688  ! comparison.
3689  !
3690  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
3691  ! * about 2-3x faster than ALGLIB for C++ without MKL
3692  ! * about 7-10x faster than "pure C#" edition of ALGLIB
3693  ! Difference in performance will be more striking on newer CPU's with
3694  ! support for newer SIMD instructions. Generally, MKL accelerates any
3695  ! problem whose size is at least 128, with best efficiency achieved for
3696  ! N's larger than 512.
3697  !
3698  ! Commercial edition of ALGLIB also supports multithreaded acceleration
3699  ! of this function. We should note that matrix inversion is harder to
3700  ! parallelize than, say, matrix-matrix product - this algorithm has
3701  ! many internal synchronization points which can not be avoided. However
3702  ! parallelism starts to be profitable starting from N=1024, achieving
3703  ! near-linear speedup for N=4096 or higher.
3704  !
3705  ! In order to use multicore features you have to:
3706  ! * use commercial version of ALGLIB
3707  ! * call this function with "smp_" prefix, which indicates that
3708  ! multicore code will be used (for multicore support)
3709  !
3710  ! We recommend you to read 'Working with commercial version' section of
3711  ! ALGLIB Reference Manual in order to find out how to use performance-
3712  ! related features provided by commercial edition of ALGLIB.
3713 
3714 INPUT PARAMETERS:
3715  A - LU decomposition of the matrix
3716  (output of CMatrixLU subroutine).
3717  Pivots - table of permutations
3718  (the output of CMatrixLU subroutine).
3719  N - size of matrix A (optional) :
3720  * if given, only principal NxN submatrix is processed and
3721  overwritten. other elements are unchanged.
3722  * if not given, size is automatically determined from
3723  matrix size (A must be square matrix)
3724 
3725 OUTPUT PARAMETERS:
3726  Info - return code, same as in RMatrixLUInverse
3727  Rep - solver report, same as in RMatrixLUInverse
3728  A - inverse of matrix A, same as in RMatrixLUInverse
3729 
3730  -- ALGLIB routine --
3731  05.02.2010
3732  Bochkanov Sergey
3733 *************************************************************************/
3734 void cmatrixluinverse(complex_2d_array &a, const integer_1d_array &pivots, const ae_int_t n, ae_int_t &info, matinvreport &rep);
3738 
3739 
3740 /*************************************************************************
3741 Inversion of a general matrix.
3742 
3743 COMMERCIAL EDITION OF ALGLIB:
3744 
3745  ! Commercial version of ALGLIB includes two important improvements of
3746  ! this function, which can be used from C++ and C#:
3747  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3748  ! * multicore support
3749  !
3750  ! Intel MKL gives approximately constant (with respect to number of
3751  ! worker threads) acceleration factor which depends on CPU being used,
3752  ! problem size and "baseline" ALGLIB edition which is used for
3753  ! comparison.
3754  !
3755  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
3756  ! * about 2-3x faster than ALGLIB for C++ without MKL
3757  ! * about 7-10x faster than "pure C#" edition of ALGLIB
3758  ! Difference in performance will be more striking on newer CPU's with
3759  ! support for newer SIMD instructions. Generally, MKL accelerates any
3760  ! problem whose size is at least 128, with best efficiency achieved for
3761  ! N's larger than 512.
3762  !
3763  ! Commercial edition of ALGLIB also supports multithreaded acceleration
3764  ! of this function. We should note that matrix inversion is harder to
3765  ! parallelize than, say, matrix-matrix product - this algorithm has
3766  ! many internal synchronization points which can not be avoided. However
3767  ! parallelism starts to be profitable starting from N=1024, achieving
3768  ! near-linear speedup for N=4096 or higher.
3769  !
3770  ! In order to use multicore features you have to:
3771  ! * use commercial version of ALGLIB
3772  ! * call this function with "smp_" prefix, which indicates that
3773  ! multicore code will be used (for multicore support)
3774  !
3775  ! We recommend you to read 'Working with commercial version' section of
3776  ! ALGLIB Reference Manual in order to find out how to use performance-
3777  ! related features provided by commercial edition of ALGLIB.
3778 
3779 Input parameters:
3780  A - matrix
3781  N - size of matrix A (optional) :
3782  * if given, only principal NxN submatrix is processed and
3783  overwritten. other elements are unchanged.
3784  * if not given, size is automatically determined from
3785  matrix size (A must be square matrix)
3786 
3787 Output parameters:
3788  Info - return code, same as in RMatrixLUInverse
3789  Rep - solver report, same as in RMatrixLUInverse
3790  A - inverse of matrix A, same as in RMatrixLUInverse
3791 
3792  -- ALGLIB --
3793  Copyright 2005 by Bochkanov Sergey
3794 *************************************************************************/
3799 
3800 
3801 /*************************************************************************
3802 Inversion of a symmetric positive definite matrix which is given
3803 by Cholesky decomposition.
3804 
3805 COMMERCIAL EDITION OF ALGLIB:
3806 
3807  ! Commercial version of ALGLIB includes two important improvements of
3808  ! this function, which can be used from C++ and C#:
3809  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3810  ! * multicore support
3811  !
3812  ! Intel MKL gives approximately constant (with respect to number of
3813  ! worker threads) acceleration factor which depends on CPU being used,
3814  ! problem size and "baseline" ALGLIB edition which is used for
3815  ! comparison.
3816  !
3817  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
3818  ! * about 2-3x faster than ALGLIB for C++ without MKL
3819  ! * about 7-10x faster than "pure C#" edition of ALGLIB
3820  ! Difference in performance will be more striking on newer CPU's with
3821  ! support for newer SIMD instructions. Generally, MKL accelerates any
3822  ! problem whose size is at least 128, with best efficiency achieved for
3823  ! N's larger than 512.
3824  !
3825  ! Commercial edition of ALGLIB also supports multithreaded acceleration
3826  ! of this function. However, Cholesky inversion is a "difficult"
3827  ! algorithm - it has lots of internal synchronization points which
3828  ! prevents efficient parallelization of algorithm. Only very large
3829  ! problems (N=thousands) can be efficiently parallelized.
3830  !
3831  ! We recommend you to read 'Working with commercial version' section of
3832  ! ALGLIB Reference Manual in order to find out how to use performance-
3833  ! related features provided by commercial edition of ALGLIB.
3834 
3835 Input parameters:
3836  A - Cholesky decomposition of the matrix to be inverted:
3837  A=U'*U or A = L*L'.
3838  Output of SPDMatrixCholesky subroutine.
3839  N - size of matrix A (optional) :
3840  * if given, only principal NxN submatrix is processed and
3841  overwritten. other elements are unchanged.
3842  * if not given, size is automatically determined from
3843  matrix size (A must be square matrix)
3844  IsUpper - storage type (optional):
3845  * if True, symmetric matrix A is given by its upper
3846  triangle, and the lower triangle isn't used/changed by
3847  function
3848  * if False, symmetric matrix A is given by its lower
3849  triangle, and the upper triangle isn't used/changed by
3850  function
3851  * if not given, lower half is used.
3852 
3853 Output parameters:
3854  Info - return code, same as in RMatrixLUInverse
3855  Rep - solver report, same as in RMatrixLUInverse
3856  A - inverse of matrix A, same as in RMatrixLUInverse
3857 
3858  -- ALGLIB routine --
3859  10.02.2010
3860  Bochkanov Sergey
3861 *************************************************************************/
3862 void spdmatrixcholeskyinverse(real_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep);
3863 void smp_spdmatrixcholeskyinverse(real_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep);
3866 
3867 
3868 /*************************************************************************
3869 Inversion of a symmetric positive definite matrix.
3870 
3871 Given an upper or lower triangle of a symmetric positive definite matrix,
3872 the algorithm generates matrix A^-1 and saves the upper or lower triangle
3873 depending on the input.
3874 
3875 COMMERCIAL EDITION OF ALGLIB:
3876 
3877  ! Commercial version of ALGLIB includes two important improvements of
3878  ! this function, which can be used from C++ and C#:
3879  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3880  ! * multicore support
3881  !
3882  ! Intel MKL gives approximately constant (with respect to number of
3883  ! worker threads) acceleration factor which depends on CPU being used,
3884  ! problem size and "baseline" ALGLIB edition which is used for
3885  ! comparison.
3886  !
3887  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
3888  ! * about 2-3x faster than ALGLIB for C++ without MKL
3889  ! * about 7-10x faster than "pure C#" edition of ALGLIB
3890  ! Difference in performance will be more striking on newer CPU's with
3891  ! support for newer SIMD instructions. Generally, MKL accelerates any
3892  ! problem whose size is at least 128, with best efficiency achieved for
3893  ! N's larger than 512.
3894  !
3895  ! Commercial edition of ALGLIB also supports multithreaded acceleration
3896  ! of this function. However, Cholesky inversion is a "difficult"
3897  ! algorithm - it has lots of internal synchronization points which
3898  ! prevents efficient parallelization of algorithm. Only very large
3899  ! problems (N=thousands) can be efficiently parallelized.
3900  !
3901  ! We recommend you to read 'Working with commercial version' section of
3902  ! ALGLIB Reference Manual in order to find out how to use performance-
3903  ! related features provided by commercial edition of ALGLIB.
3904 
3905 Input parameters:
3906  A - matrix to be inverted (upper or lower triangle).
3907  Array with elements [0..N-1,0..N-1].
3908  N - size of matrix A (optional) :
3909  * if given, only principal NxN submatrix is processed and
3910  overwritten. other elements are unchanged.
3911  * if not given, size is automatically determined from
3912  matrix size (A must be square matrix)
3913  IsUpper - storage type (optional):
3914  * if True, symmetric matrix A is given by its upper
3915  triangle, and the lower triangle isn't used/changed by
3916  function
3917  * if False, symmetric matrix A is given by its lower
3918  triangle, and the upper triangle isn't used/changed by
3919  function
3920  * if not given, both lower and upper triangles must be
3921  filled.
3922 
3923 Output parameters:
3924  Info - return code, same as in RMatrixLUInverse
3925  Rep - solver report, same as in RMatrixLUInverse
3926  A - inverse of matrix A, same as in RMatrixLUInverse
3927 
3928  -- ALGLIB routine --
3929  10.02.2010
3930  Bochkanov Sergey
3931 *************************************************************************/
3932 void spdmatrixinverse(real_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep);
3933 void smp_spdmatrixinverse(real_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep);
3936 
3937 
3938 /*************************************************************************
3939 Inversion of a Hermitian positive definite matrix which is given
3940 by Cholesky decomposition.
3941 
3942 COMMERCIAL EDITION OF ALGLIB:
3943 
3944  ! Commercial version of ALGLIB includes two important improvements of
3945  ! this function, which can be used from C++ and C#:
3946  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3947  ! * multicore support
3948  !
3949  ! Intel MKL gives approximately constant (with respect to number of
3950  ! worker threads) acceleration factor which depends on CPU being used,
3951  ! problem size and "baseline" ALGLIB edition which is used for
3952  ! comparison.
3953  !
3954  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
3955  ! * about 2-3x faster than ALGLIB for C++ without MKL
3956  ! * about 7-10x faster than "pure C#" edition of ALGLIB
3957  ! Difference in performance will be more striking on newer CPU's with
3958  ! support for newer SIMD instructions. Generally, MKL accelerates any
3959  ! problem whose size is at least 128, with best efficiency achieved for
3960  ! N's larger than 512.
3961  !
3962  ! Commercial edition of ALGLIB also supports multithreaded acceleration
3963  ! of this function. However, Cholesky inversion is a "difficult"
3964  ! algorithm - it has lots of internal synchronization points which
3965  ! prevents efficient parallelization of algorithm. Only very large
3966  ! problems (N=thousands) can be efficiently parallelized.
3967  !
3968  ! We recommend you to read 'Working with commercial version' section of
3969  ! ALGLIB Reference Manual in order to find out how to use performance-
3970  ! related features provided by commercial edition of ALGLIB.
3971 
3972 Input parameters:
3973  A - Cholesky decomposition of the matrix to be inverted:
3974  A=U'*U or A = L*L'.
3975  Output of HPDMatrixCholesky subroutine.
3976  N - size of matrix A (optional) :
3977  * if given, only principal NxN submatrix is processed and
3978  overwritten. other elements are unchanged.
3979  * if not given, size is automatically determined from
3980  matrix size (A must be square matrix)
3981  IsUpper - storage type (optional):
3982  * if True, symmetric matrix A is given by its upper
3983  triangle, and the lower triangle isn't used/changed by
3984  function
3985  * if False, symmetric matrix A is given by its lower
3986  triangle, and the upper triangle isn't used/changed by
3987  function
3988  * if not given, lower half is used.
3989 
3990 Output parameters:
3991  Info - return code, same as in RMatrixLUInverse
3992  Rep - solver report, same as in RMatrixLUInverse
3993  A - inverse of matrix A, same as in RMatrixLUInverse
3994 
3995  -- ALGLIB routine --
3996  10.02.2010
3997  Bochkanov Sergey
3998 *************************************************************************/
3999 void hpdmatrixcholeskyinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep);
4000 void smp_hpdmatrixcholeskyinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep);
4003 
4004 
4005 /*************************************************************************
4006 Inversion of a Hermitian positive definite matrix.
4007 
4008 Given an upper or lower triangle of a Hermitian positive definite matrix,
4009 the algorithm generates matrix A^-1 and saves the upper or lower triangle
4010 depending on the input.
4011 
4012 COMMERCIAL EDITION OF ALGLIB:
4013 
4014  ! Commercial version of ALGLIB includes two important improvements of
4015  ! this function, which can be used from C++ and C#:
4016  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4017  ! * multicore support
4018  !
4019  ! Intel MKL gives approximately constant (with respect to number of
4020  ! worker threads) acceleration factor which depends on CPU being used,
4021  ! problem size and "baseline" ALGLIB edition which is used for
4022  ! comparison.
4023  !
4024  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4025  ! * about 2-3x faster than ALGLIB for C++ without MKL
4026  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4027  ! Difference in performance will be more striking on newer CPU's with
4028  ! support for newer SIMD instructions. Generally, MKL accelerates any
4029  ! problem whose size is at least 128, with best efficiency achieved for
4030  ! N's larger than 512.
4031  !
4032  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4033  ! of this function. However, Cholesky inversion is a "difficult"
4034  ! algorithm - it has lots of internal synchronization points which
4035  ! prevents efficient parallelization of algorithm. Only very large
4036  ! problems (N=thousands) can be efficiently parallelized.
4037  !
4038  ! We recommend you to read 'Working with commercial version' section of
4039  ! ALGLIB Reference Manual in order to find out how to use performance-
4040  ! related features provided by commercial edition of ALGLIB.
4041 
4042 Input parameters:
4043  A - matrix to be inverted (upper or lower triangle).
4044  Array with elements [0..N-1,0..N-1].
4045  N - size of matrix A (optional) :
4046  * if given, only principal NxN submatrix is processed and
4047  overwritten. other elements are unchanged.
4048  * if not given, size is automatically determined from
4049  matrix size (A must be square matrix)
4050  IsUpper - storage type (optional):
4051  * if True, symmetric matrix A is given by its upper
4052  triangle, and the lower triangle isn't used/changed by
4053  function
4054  * if False, symmetric matrix A is given by its lower
4055  triangle, and the upper triangle isn't used/changed by
4056  function
4057  * if not given, both lower and upper triangles must be
4058  filled.
4059 
4060 Output parameters:
4061  Info - return code, same as in RMatrixLUInverse
4062  Rep - solver report, same as in RMatrixLUInverse
4063  A - inverse of matrix A, same as in RMatrixLUInverse
4064 
4065  -- ALGLIB routine --
4066  10.02.2010
4067  Bochkanov Sergey
4068 *************************************************************************/
4069 void hpdmatrixinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep);
4070 void smp_hpdmatrixinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep);
4073 
4074 
4075 /*************************************************************************
4076 Triangular matrix inverse (real)
4077 
4078 The subroutine inverts the following types of matrices:
4079  * upper triangular
4080  * upper triangular with unit diagonal
4081  * lower triangular
4082  * lower triangular with unit diagonal
4083 
4084 In case of an upper (lower) triangular matrix, the inverse matrix will
4085 also be upper (lower) triangular, and after the end of the algorithm, the
4086 inverse matrix replaces the source matrix. The elements below (above) the
4087 main diagonal are not changed by the algorithm.
4088 
4089 If the matrix has a unit diagonal, the inverse matrix also has a unit
4090 diagonal, and the diagonal elements are not passed to the algorithm.
4091 
4092 COMMERCIAL EDITION OF ALGLIB:
4093 
4094  ! Commercial version of ALGLIB includes two important improvements of
4095  ! this function, which can be used from C++ and C#:
4096  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4097  ! * multicore support
4098  !
4099  ! Intel MKL gives approximately constant (with respect to number of
4100  ! worker threads) acceleration factor which depends on CPU being used,
4101  ! problem size and "baseline" ALGLIB edition which is used for
4102  ! comparison.
4103  !
4104  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4105  ! * about 2-3x faster than ALGLIB for C++ without MKL
4106  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4107  ! Difference in performance will be more striking on newer CPU's with
4108  ! support for newer SIMD instructions. Generally, MKL accelerates any
4109  ! problem whose size is at least 128, with best efficiency achieved for
4110  ! N's larger than 512.
4111  !
4112  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4113  ! of this function. We should note that triangular inverse is harder to
4114  ! parallelize than, say, matrix-matrix product - this algorithm has
4115  ! many internal synchronization points which can not be avoided. However
4116  ! parallelism starts to be profitable starting from N=1024, achieving
4117  ! near-linear speedup for N=4096 or higher.
4118  !
4119  ! In order to use multicore features you have to:
4120  ! * use commercial version of ALGLIB
4121  ! * call this function with "smp_" prefix, which indicates that
4122  ! multicore code will be used (for multicore support)
4123  !
4124  ! We recommend you to read 'Working with commercial version' section of
4125  ! ALGLIB Reference Manual in order to find out how to use performance-
4126  ! related features provided by commercial edition of ALGLIB.
4127 
4128 Input parameters:
4129  A - matrix, array[0..N-1, 0..N-1].
4130  N - size of matrix A (optional) :
4131  * if given, only principal NxN submatrix is processed and
4132  overwritten. other elements are unchanged.
4133  * if not given, size is automatically determined from
4134  matrix size (A must be square matrix)
4135  IsUpper - True, if the matrix is upper triangular.
4136  IsUnit - diagonal type (optional):
4137  * if True, matrix has unit diagonal (a[i,i] are NOT used)
4138  * if False, matrix diagonal is arbitrary
4139  * if not given, False is assumed
4140 
4141 Output parameters:
4142  Info - same as for RMatrixLUInverse
4143  Rep - same as for RMatrixLUInverse
4144  A - same as for RMatrixLUInverse.
4145 
4146  -- ALGLIB --
4147  Copyright 05.02.2010 by Bochkanov Sergey
4148 *************************************************************************/
4149 void rmatrixtrinverse(real_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit, ae_int_t &info, matinvreport &rep);
4150 void smp_rmatrixtrinverse(real_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit, ae_int_t &info, matinvreport &rep);
4151 void rmatrixtrinverse(real_2d_array &a, const bool isupper, ae_int_t &info, matinvreport &rep);
4152 void smp_rmatrixtrinverse(real_2d_array &a, const bool isupper, ae_int_t &info, matinvreport &rep);
4153 
4154 
4155 /*************************************************************************
4156 Triangular matrix inverse (complex)
4157 
4158 The subroutine inverts the following types of matrices:
4159  * upper triangular
4160  * upper triangular with unit diagonal
4161  * lower triangular
4162  * lower triangular with unit diagonal
4163 
4164 In case of an upper (lower) triangular matrix, the inverse matrix will
4165 also be upper (lower) triangular, and after the end of the algorithm, the
4166 inverse matrix replaces the source matrix. The elements below (above) the
4167 main diagonal are not changed by the algorithm.
4168 
4169 If the matrix has a unit diagonal, the inverse matrix also has a unit
4170 diagonal, and the diagonal elements are not passed to the algorithm.
4171 
4172 COMMERCIAL EDITION OF ALGLIB:
4173 
4174  ! Commercial version of ALGLIB includes two important improvements of
4175  ! this function, which can be used from C++ and C#:
4176  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4177  ! * multicore support
4178  !
4179  ! Intel MKL gives approximately constant (with respect to number of
4180  ! worker threads) acceleration factor which depends on CPU being used,
4181  ! problem size and "baseline" ALGLIB edition which is used for
4182  ! comparison.
4183  !
4184  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4185  ! * about 2-3x faster than ALGLIB for C++ without MKL
4186  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4187  ! Difference in performance will be more striking on newer CPU's with
4188  ! support for newer SIMD instructions. Generally, MKL accelerates any
4189  ! problem whose size is at least 128, with best efficiency achieved for
4190  ! N's larger than 512.
4191  !
4192  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4193  ! of this function. We should note that triangular inverse is harder to
4194  ! parallelize than, say, matrix-matrix product - this algorithm has
4195  ! many internal synchronization points which can not be avoided. However
4196  ! parallelism starts to be profitable starting from N=1024, achieving
4197  ! near-linear speedup for N=4096 or higher.
4198  !
4199  ! In order to use multicore features you have to:
4200  ! * use commercial version of ALGLIB
4201  ! * call this function with "smp_" prefix, which indicates that
4202  ! multicore code will be used (for multicore support)
4203  !
4204  ! We recommend you to read 'Working with commercial version' section of
4205  ! ALGLIB Reference Manual in order to find out how to use performance-
4206  ! related features provided by commercial edition of ALGLIB.
4207 
4208 Input parameters:
4209  A - matrix, array[0..N-1, 0..N-1].
4210  N - size of matrix A (optional) :
4211  * if given, only principal NxN submatrix is processed and
4212  overwritten. other elements are unchanged.
4213  * if not given, size is automatically determined from
4214  matrix size (A must be square matrix)
4215  IsUpper - True, if the matrix is upper triangular.
4216  IsUnit - diagonal type (optional):
4217  * if True, matrix has unit diagonal (a[i,i] are NOT used)
4218  * if False, matrix diagonal is arbitrary
4219  * if not given, False is assumed
4220 
4221 Output parameters:
4222  Info - same as for RMatrixLUInverse
4223  Rep - same as for RMatrixLUInverse
4224  A - same as for RMatrixLUInverse.
4225 
4226  -- ALGLIB --
4227  Copyright 05.02.2010 by Bochkanov Sergey
4228 *************************************************************************/
4229 void cmatrixtrinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit, ae_int_t &info, matinvreport &rep);
4230 void smp_cmatrixtrinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit, ae_int_t &info, matinvreport &rep);
4231 void cmatrixtrinverse(complex_2d_array &a, const bool isupper, ae_int_t &info, matinvreport &rep);
4232 void smp_cmatrixtrinverse(complex_2d_array &a, const bool isupper, ae_int_t &info, matinvreport &rep);
4233 
4234 /*************************************************************************
4235 QR decomposition of a rectangular matrix of size MxN
4236 
4237 COMMERCIAL EDITION OF ALGLIB:
4238 
4239  ! Commercial version of ALGLIB includes two important improvements of
4240  ! this function, which can be used from C++ and C#:
4241  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4242  ! * multicore support
4243  !
4244  ! Intel MKL gives approximately constant (with respect to number of
4245  ! worker threads) acceleration factor which depends on CPU being used,
4246  ! problem size and "baseline" ALGLIB edition which is used for
4247  ! comparison.
4248  !
4249  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4250  ! * about 2-3x faster than ALGLIB for C++ without MKL
4251  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4252  ! Difference in performance will be more striking on newer CPU's with
4253  ! support for newer SIMD instructions. Generally, MKL accelerates any
4254  ! problem whose size is at least 128, with best efficiency achieved for
4255  ! N's larger than 512.
4256  !
4257  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4258  ! of this function. We should note that QP decomposition is harder to
4259  ! parallelize than, say, matrix-matrix product - this algorithm has
4260  ! many internal synchronization points which can not be avoided. However
4261  ! parallelism starts to be profitable starting from N=512, achieving
4262  ! near-linear speedup for N=4096 or higher.
4263  !
4264  ! In order to use multicore features you have to:
4265  ! * use commercial version of ALGLIB
4266  ! * call this function with "smp_" prefix, which indicates that
4267  ! multicore code will be used (for multicore support)
4268  !
4269  ! We recommend you to read 'Working with commercial version' section of
4270  ! ALGLIB Reference Manual in order to find out how to use performance-
4271  ! related features provided by commercial edition of ALGLIB.
4272 
4273 Input parameters:
4274  A - matrix A whose indexes range within [0..M-1, 0..N-1].
4275  M - number of rows in matrix A.
4276  N - number of columns in matrix A.
4277 
4278 Output parameters:
4279  A - matrices Q and R in compact form (see below).
4280  Tau - array of scalar factors which are used to form
4281  matrix Q. Array whose index ranges within [0.. Min(M-1,N-1)].
4282 
4283 Matrix A is represented as A = QR, where Q is an orthogonal matrix of size
4284 MxM, R - upper triangular (or upper trapezoid) matrix of size M x N.
4285 
4286 The elements of matrix R are located on and above the main diagonal of
4287 matrix A. The elements which are located in Tau array and below the main
4288 diagonal of matrix A are used to form matrix Q as follows:
4289 
4290 Matrix Q is represented as a product of elementary reflections
4291 
4292 Q = H(0)*H(2)*...*H(k-1),
4293 
4294 where k = min(m,n), and each H(i) is in the form
4295 
4296 H(i) = 1 - tau * v * (v^T)
4297 
4298 where tau is a scalar stored in Tau[I]; v - real vector,
4299 so that v(0:i-1) = 0, v(i) = 1, v(i+1:m-1) stored in A(i+1:m-1,i).
4300 
4301  -- ALGLIB routine --
4302  17.02.2010
4303  Bochkanov Sergey
4304 *************************************************************************/
4305 void rmatrixqr(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tau);
4306 void smp_rmatrixqr(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tau);
4307 
4308 
4309 /*************************************************************************
4310 LQ decomposition of a rectangular matrix of size MxN
4311 
4312 COMMERCIAL EDITION OF ALGLIB:
4313 
4314  ! Commercial version of ALGLIB includes two important improvements of
4315  ! this function, which can be used from C++ and C#:
4316  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4317  ! * multicore support
4318  !
4319  ! Intel MKL gives approximately constant (with respect to number of
4320  ! worker threads) acceleration factor which depends on CPU being used,
4321  ! problem size and "baseline" ALGLIB edition which is used for
4322  ! comparison.
4323  !
4324  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4325  ! * about 2-3x faster than ALGLIB for C++ without MKL
4326  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4327  ! Difference in performance will be more striking on newer CPU's with
4328  ! support for newer SIMD instructions. Generally, MKL accelerates any
4329  ! problem whose size is at least 128, with best efficiency achieved for
4330  ! N's larger than 512.
4331  !
4332  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4333  ! of this function. We should note that QP decomposition is harder to
4334  ! parallelize than, say, matrix-matrix product - this algorithm has
4335  ! many internal synchronization points which can not be avoided. However
4336  ! parallelism starts to be profitable starting from N=512, achieving
4337  ! near-linear speedup for N=4096 or higher.
4338  !
4339  ! In order to use multicore features you have to:
4340  ! * use commercial version of ALGLIB
4341  ! * call this function with "smp_" prefix, which indicates that
4342  ! multicore code will be used (for multicore support)
4343  !
4344  ! We recommend you to read 'Working with commercial version' section of
4345  ! ALGLIB Reference Manual in order to find out how to use performance-
4346  ! related features provided by commercial edition of ALGLIB.
4347 
4348 Input parameters:
4349  A - matrix A whose indexes range within [0..M-1, 0..N-1].
4350  M - number of rows in matrix A.
4351  N - number of columns in matrix A.
4352 
4353 Output parameters:
4354  A - matrices L and Q in compact form (see below)
4355  Tau - array of scalar factors which are used to form
4356  matrix Q. Array whose index ranges within [0..Min(M,N)-1].
4357 
4358 Matrix A is represented as A = LQ, where Q is an orthogonal matrix of size
4359 MxM, L - lower triangular (or lower trapezoid) matrix of size M x N.
4360 
4361 The elements of matrix L are located on and below the main diagonal of
4362 matrix A. The elements which are located in Tau array and above the main
4363 diagonal of matrix A are used to form matrix Q as follows:
4364 
4365 Matrix Q is represented as a product of elementary reflections
4366 
4367 Q = H(k-1)*H(k-2)*...*H(1)*H(0),
4368 
4369 where k = min(m,n), and each H(i) is of the form
4370 
4371 H(i) = 1 - tau * v * (v^T)
4372 
4373 where tau is a scalar stored in Tau[I]; v - real vector, so that v(0:i-1)=0,
4374 v(i) = 1, v(i+1:n-1) stored in A(i,i+1:n-1).
4375 
4376  -- ALGLIB routine --
4377  17.02.2010
4378  Bochkanov Sergey
4379 *************************************************************************/
4380 void rmatrixlq(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tau);
4381 void smp_rmatrixlq(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tau);
4382 
4383 
4384 /*************************************************************************
4385 QR decomposition of a rectangular complex matrix of size MxN
4386 
4387 COMMERCIAL EDITION OF ALGLIB:
4388 
4389  ! Commercial version of ALGLIB includes two important improvements of
4390  ! this function, which can be used from C++ and C#:
4391  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4392  ! * multicore support
4393  !
4394  ! Intel MKL gives approximately constant (with respect to number of
4395  ! worker threads) acceleration factor which depends on CPU being used,
4396  ! problem size and "baseline" ALGLIB edition which is used for
4397  ! comparison.
4398  !
4399  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4400  ! * about 2-3x faster than ALGLIB for C++ without MKL
4401  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4402  ! Difference in performance will be more striking on newer CPU's with
4403  ! support for newer SIMD instructions. Generally, MKL accelerates any
4404  ! problem whose size is at least 128, with best efficiency achieved for
4405  ! N's larger than 512.
4406  !
4407  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4408  ! of this function. We should note that QP decomposition is harder to
4409  ! parallelize than, say, matrix-matrix product - this algorithm has
4410  ! many internal synchronization points which can not be avoided. However
4411  ! parallelism starts to be profitable starting from N=512, achieving
4412  ! near-linear speedup for N=4096 or higher.
4413  !
4414  ! In order to use multicore features you have to:
4415  ! * use commercial version of ALGLIB
4416  ! * call this function with "smp_" prefix, which indicates that
4417  ! multicore code will be used (for multicore support)
4418  !
4419  ! We recommend you to read 'Working with commercial version' section of
4420  ! ALGLIB Reference Manual in order to find out how to use performance-
4421  ! related features provided by commercial edition of ALGLIB.
4422 
4423 Input parameters:
4424  A - matrix A whose indexes range within [0..M-1, 0..N-1]
4425  M - number of rows in matrix A.
4426  N - number of columns in matrix A.
4427 
4428 Output parameters:
4429  A - matrices Q and R in compact form
4430  Tau - array of scalar factors which are used to form matrix Q. Array
4431  whose indexes range within [0.. Min(M,N)-1]
4432 
4433 Matrix A is represented as A = QR, where Q is an orthogonal matrix of size
4434 MxM, R - upper triangular (or upper trapezoid) matrix of size MxN.
4435 
4436  -- LAPACK routine (version 3.0) --
4437  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
4438  Courant Institute, Argonne National Lab, and Rice University
4439  September 30, 1994
4440 *************************************************************************/
4443 
4444 
4445 /*************************************************************************
4446 LQ decomposition of a rectangular complex matrix of size MxN
4447 
4448 COMMERCIAL EDITION OF ALGLIB:
4449 
4450  ! Commercial version of ALGLIB includes two important improvements of
4451  ! this function, which can be used from C++ and C#:
4452  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4453  ! * multicore support
4454  !
4455  ! Intel MKL gives approximately constant (with respect to number of
4456  ! worker threads) acceleration factor which depends on CPU being used,
4457  ! problem size and "baseline" ALGLIB edition which is used for
4458  ! comparison.
4459  !
4460  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4461  ! * about 2-3x faster than ALGLIB for C++ without MKL
4462  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4463  ! Difference in performance will be more striking on newer CPU's with
4464  ! support for newer SIMD instructions. Generally, MKL accelerates any
4465  ! problem whose size is at least 128, with best efficiency achieved for
4466  ! N's larger than 512.
4467  !
4468  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4469  ! of this function. We should note that QP decomposition is harder to
4470  ! parallelize than, say, matrix-matrix product - this algorithm has
4471  ! many internal synchronization points which can not be avoided. However
4472  ! parallelism starts to be profitable starting from N=512, achieving
4473  ! near-linear speedup for N=4096 or higher.
4474  !
4475  ! In order to use multicore features you have to:
4476  ! * use commercial version of ALGLIB
4477  ! * call this function with "smp_" prefix, which indicates that
4478  ! multicore code will be used (for multicore support)
4479  !
4480  ! We recommend you to read 'Working with commercial version' section of
4481  ! ALGLIB Reference Manual in order to find out how to use performance-
4482  ! related features provided by commercial edition of ALGLIB.
4483 
4484 Input parameters:
4485  A - matrix A whose indexes range within [0..M-1, 0..N-1]
4486  M - number of rows in matrix A.
4487  N - number of columns in matrix A.
4488 
4489 Output parameters:
4490  A - matrices Q and L in compact form
4491  Tau - array of scalar factors which are used to form matrix Q. Array
4492  whose indexes range within [0.. Min(M,N)-1]
4493 
4494 Matrix A is represented as A = LQ, where Q is an orthogonal matrix of size
4495 MxM, L - lower triangular (or lower trapezoid) matrix of size MxN.
4496 
4497  -- LAPACK routine (version 3.0) --
4498  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
4499  Courant Institute, Argonne National Lab, and Rice University
4500  September 30, 1994
4501 *************************************************************************/
4504 
4505 
4506 /*************************************************************************
4507 Partial unpacking of matrix Q from the QR decomposition of a matrix A
4508 
4509 COMMERCIAL EDITION OF ALGLIB:
4510 
4511  ! Commercial version of ALGLIB includes two important improvements of
4512  ! this function, which can be used from C++ and C#:
4513  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4514  ! * multicore support
4515  !
4516  ! Intel MKL gives approximately constant (with respect to number of
4517  ! worker threads) acceleration factor which depends on CPU being used,
4518  ! problem size and "baseline" ALGLIB edition which is used for
4519  ! comparison.
4520  !
4521  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4522  ! * about 2-3x faster than ALGLIB for C++ without MKL
4523  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4524  ! Difference in performance will be more striking on newer CPU's with
4525  ! support for newer SIMD instructions. Generally, MKL accelerates any
4526  ! problem whose size is at least 128, with best efficiency achieved for
4527  ! N's larger than 512.
4528  !
4529  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4530  ! of this function. We should note that QP decomposition is harder to
4531  ! parallelize than, say, matrix-matrix product - this algorithm has
4532  ! many internal synchronization points which can not be avoided. However
4533  ! parallelism starts to be profitable starting from N=512, achieving
4534  ! near-linear speedup for N=4096 or higher.
4535  !
4536  ! In order to use multicore features you have to:
4537  ! * use commercial version of ALGLIB
4538  ! * call this function with "smp_" prefix, which indicates that
4539  ! multicore code will be used (for multicore support)
4540  !
4541  ! We recommend you to read 'Working with commercial version' section of
4542  ! ALGLIB Reference Manual in order to find out how to use performance-
4543  ! related features provided by commercial edition of ALGLIB.
4544 
4545 Input parameters:
4546  A - matrices Q and R in compact form.
4547  Output of RMatrixQR subroutine.
4548  M - number of rows in given matrix A. M>=0.
4549  N - number of columns in given matrix A. N>=0.
4550  Tau - scalar factors which are used to form Q.
4551  Output of the RMatrixQR subroutine.
4552  QColumns - required number of columns of matrix Q. M>=QColumns>=0.
4553 
4554 Output parameters:
4555  Q - first QColumns columns of matrix Q.
4556  Array whose indexes range within [0..M-1, 0..QColumns-1].
4557  If QColumns=0, the array remains unchanged.
4558 
4559  -- ALGLIB routine --
4560  17.02.2010
4561  Bochkanov Sergey
4562 *************************************************************************/
4563 void rmatrixqrunpackq(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const real_1d_array &tau, const ae_int_t qcolumns, real_2d_array &q);
4564 void smp_rmatrixqrunpackq(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const real_1d_array &tau, const ae_int_t qcolumns, real_2d_array &q);
4565 
4566 
4567 /*************************************************************************
4568 Unpacking of matrix R from the QR decomposition of a matrix A
4569 
4570 Input parameters:
4571  A - matrices Q and R in compact form.
4572  Output of RMatrixQR subroutine.
4573  M - number of rows in given matrix A. M>=0.
4574  N - number of columns in given matrix A. N>=0.
4575 
4576 Output parameters:
4577  R - matrix R, array[0..M-1, 0..N-1].
4578 
4579  -- ALGLIB routine --
4580  17.02.2010
4581  Bochkanov Sergey
4582 *************************************************************************/
4583 void rmatrixqrunpackr(const real_2d_array &a, const ae_int_t m, const ae_int_t n, real_2d_array &r);
4584 
4585 
4586 /*************************************************************************
4587 Partial unpacking of matrix Q from the LQ decomposition of a matrix A
4588 
4589 COMMERCIAL EDITION OF ALGLIB:
4590 
4591  ! Commercial version of ALGLIB includes two important improvements of
4592  ! this function, which can be used from C++ and C#:
4593  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4594  ! * multicore support
4595  !
4596  ! Intel MKL gives approximately constant (with respect to number of
4597  ! worker threads) acceleration factor which depends on CPU being used,
4598  ! problem size and "baseline" ALGLIB edition which is used for
4599  ! comparison.
4600  !
4601  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4602  ! * about 2-3x faster than ALGLIB for C++ without MKL
4603  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4604  ! Difference in performance will be more striking on newer CPU's with
4605  ! support for newer SIMD instructions. Generally, MKL accelerates any
4606  ! problem whose size is at least 128, with best efficiency achieved for
4607  ! N's larger than 512.
4608  !
4609  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4610  ! of this function. We should note that QP decomposition is harder to
4611  ! parallelize than, say, matrix-matrix product - this algorithm has
4612  ! many internal synchronization points which can not be avoided. However
4613  ! parallelism starts to be profitable starting from N=512, achieving
4614  ! near-linear speedup for N=4096 or higher.
4615  !
4616  ! In order to use multicore features you have to:
4617  ! * use commercial version of ALGLIB
4618  ! * call this function with "smp_" prefix, which indicates that
4619  ! multicore code will be used (for multicore support)
4620  !
4621  ! We recommend you to read 'Working with commercial version' section of
4622  ! ALGLIB Reference Manual in order to find out how to use performance-
4623  ! related features provided by commercial edition of ALGLIB.
4624 
4625 Input parameters:
4626  A - matrices L and Q in compact form.
4627  Output of RMatrixLQ subroutine.
4628  M - number of rows in given matrix A. M>=0.
4629  N - number of columns in given matrix A. N>=0.
4630  Tau - scalar factors which are used to form Q.
4631  Output of the RMatrixLQ subroutine.
4632  QRows - required number of rows in matrix Q. N>=QRows>=0.
4633 
4634 Output parameters:
4635  Q - first QRows rows of matrix Q. Array whose indexes range
4636  within [0..QRows-1, 0..N-1]. If QRows=0, the array remains
4637  unchanged.
4638 
4639  -- ALGLIB routine --
4640  17.02.2010
4641  Bochkanov Sergey
4642 *************************************************************************/
4643 void rmatrixlqunpackq(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const real_1d_array &tau, const ae_int_t qrows, real_2d_array &q);
4644 void smp_rmatrixlqunpackq(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const real_1d_array &tau, const ae_int_t qrows, real_2d_array &q);
4645 
4646 
4647 /*************************************************************************
4648 Unpacking of matrix L from the LQ decomposition of a matrix A
4649 
4650 Input parameters:
4651  A - matrices Q and L in compact form.
4652  Output of RMatrixLQ subroutine.
4653  M - number of rows in given matrix A. M>=0.
4654  N - number of columns in given matrix A. N>=0.
4655 
4656 Output parameters:
4657  L - matrix L, array[0..M-1, 0..N-1].
4658 
4659  -- ALGLIB routine --
4660  17.02.2010
4661  Bochkanov Sergey
4662 *************************************************************************/
4663 void rmatrixlqunpackl(const real_2d_array &a, const ae_int_t m, const ae_int_t n, real_2d_array &l);
4664 
4665 
4666 /*************************************************************************
4667 Partial unpacking of matrix Q from QR decomposition of a complex matrix A.
4668 
4669 COMMERCIAL EDITION OF ALGLIB:
4670 
4671  ! Commercial version of ALGLIB includes two important improvements of
4672  ! this function, which can be used from C++ and C#:
4673  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4674  ! * multicore support
4675  !
4676  ! Intel MKL gives approximately constant (with respect to number of
4677  ! worker threads) acceleration factor which depends on CPU being used,
4678  ! problem size and "baseline" ALGLIB edition which is used for
4679  ! comparison.
4680  !
4681  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4682  ! * about 2-3x faster than ALGLIB for C++ without MKL
4683  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4684  ! Difference in performance will be more striking on newer CPU's with
4685  ! support for newer SIMD instructions. Generally, MKL accelerates any
4686  ! problem whose size is at least 128, with best efficiency achieved for
4687  ! N's larger than 512.
4688  !
4689  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4690  ! of this function. We should note that QP decomposition is harder to
4691  ! parallelize than, say, matrix-matrix product - this algorithm has
4692  ! many internal synchronization points which can not be avoided. However
4693  ! parallelism starts to be profitable starting from N=512, achieving
4694  ! near-linear speedup for N=4096 or higher.
4695  !
4696  ! In order to use multicore features you have to:
4697  ! * use commercial version of ALGLIB
4698  ! * call this function with "smp_" prefix, which indicates that
4699  ! multicore code will be used (for multicore support)
4700  !
4701  ! We recommend you to read 'Working with commercial version' section of
4702  ! ALGLIB Reference Manual in order to find out how to use performance-
4703  ! related features provided by commercial edition of ALGLIB.
4704 
4705 Input parameters:
4706  A - matrices Q and R in compact form.
4707  Output of CMatrixQR subroutine .
4708  M - number of rows in matrix A. M>=0.
4709  N - number of columns in matrix A. N>=0.
4710  Tau - scalar factors which are used to form Q.
4711  Output of CMatrixQR subroutine .
4712  QColumns - required number of columns in matrix Q. M>=QColumns>=0.
4713 
4714 Output parameters:
4715  Q - first QColumns columns of matrix Q.
4716  Array whose index ranges within [0..M-1, 0..QColumns-1].
4717  If QColumns=0, array isn't changed.
4718 
4719  -- ALGLIB routine --
4720  17.02.2010
4721  Bochkanov Sergey
4722 *************************************************************************/
4723 void cmatrixqrunpackq(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, const complex_1d_array &tau, const ae_int_t qcolumns, complex_2d_array &q);
4724 void smp_cmatrixqrunpackq(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, const complex_1d_array &tau, const ae_int_t qcolumns, complex_2d_array &q);
4725 
4726 
4727 /*************************************************************************
4728 Unpacking of matrix R from the QR decomposition of a matrix A
4729 
4730 Input parameters:
4731  A - matrices Q and R in compact form.
4732  Output of CMatrixQR subroutine.
4733  M - number of rows in given matrix A. M>=0.
4734  N - number of columns in given matrix A. N>=0.
4735 
4736 Output parameters:
4737  R - matrix R, array[0..M-1, 0..N-1].
4738 
4739  -- ALGLIB routine --
4740  17.02.2010
4741  Bochkanov Sergey
4742 *************************************************************************/
4744 
4745 
4746 /*************************************************************************
4747 Partial unpacking of matrix Q from LQ decomposition of a complex matrix A.
4748 
4749 COMMERCIAL EDITION OF ALGLIB:
4750 
4751  ! Commercial version of ALGLIB includes two important improvements of
4752  ! this function, which can be used from C++ and C#:
4753  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4754  ! * multicore support
4755  !
4756  ! Intel MKL gives approximately constant (with respect to number of
4757  ! worker threads) acceleration factor which depends on CPU being used,
4758  ! problem size and "baseline" ALGLIB edition which is used for
4759  ! comparison.
4760  !
4761  ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be:
4762  ! * about 2-3x faster than ALGLIB for C++ without MKL
4763  ! * about 7-10x faster than "pure C#" edition of ALGLIB
4764  ! Difference in performance will be more striking on newer CPU's with
4765  ! support for newer SIMD instructions. Generally, MKL accelerates any
4766  ! problem whose size is at least 128, with best efficiency achieved for
4767  ! N's larger than 512.
4768  !
4769  ! Commercial edition of ALGLIB also supports multithreaded acceleration
4770  ! of this function. We should note that QP decomposition is harder to
4771  ! parallelize than, say, matrix-matrix product - this algorithm has
4772  ! many internal synchronization points which can not be avoided. However
4773  ! parallelism starts to be profitable starting from N=512, achieving
4774  ! near-linear speedup for N=4096 or higher.
4775  !
4776  ! In order to use multicore features you have to:
4777  ! * use commercial version of ALGLIB
4778  ! * call this function with "smp_" prefix, which indicates that
4779  ! multicore code will be used (for multicore support)
4780  !
4781  ! We recommend you to read 'Working with commercial version' section of
4782  ! ALGLIB Reference Manual in order to find out how to use performance-
4783  ! related features provided by commercial edition of ALGLIB.
4784 
4785 Input parameters:
4786  A - matrices Q and R in compact form.
4787  Output of CMatrixLQ subroutine .
4788  M - number of rows in matrix A. M>=0.
4789  N - number of columns in matrix A. N>=0.
4790  Tau - scalar factors which are used to form Q.
4791  Output of CMatrixLQ subroutine .
4792  QRows - required number of rows in matrix Q. N>=QColumns>=0.
4793 
4794 Output parameters:
4795  Q - first QRows rows of matrix Q.
4796  Array whose index ranges within [0..QRows-1, 0..N-1].
4797  If QRows=0, array isn't changed.
4798 
4799  -- ALGLIB routine --
4800  17.02.2010
4801  Bochkanov Sergey
4802 *************************************************************************/
4803 void cmatrixlqunpackq(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, const complex_1d_array &tau, const ae_int_t qrows, complex_2d_array &q);
4804 void smp_cmatrixlqunpackq(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, const complex_1d_array &tau, const ae_int_t qrows, complex_2d_array &q);
4805 
4806 
4807 /*************************************************************************
4808 Unpacking of matrix L from the LQ decomposition of a matrix A
4809 
4810 Input parameters:
4811  A - matrices Q and L in compact form.
4812  Output of CMatrixLQ subroutine.
4813  M - number of rows in given matrix A. M>=0.
4814  N - number of columns in given matrix A. N>=0.
4815 
4816 Output parameters:
4817  L - matrix L, array[0..M-1, 0..N-1].
4818 
4819  -- ALGLIB routine --
4820  17.02.2010
4821  Bochkanov Sergey
4822 *************************************************************************/
4824 
4825 
4826 /*************************************************************************
4827 Reduction of a rectangular matrix to bidiagonal form
4828 
4829 The algorithm reduces the rectangular matrix A to bidiagonal form by
4830 orthogonal transformations P and Q: A = Q*B*(P^T).
4831 
4832 COMMERCIAL EDITION OF ALGLIB:
4833 
4834  ! Commercial version of ALGLIB includes one important improvement of
4835  ! this function, which can be used from C++ and C#:
4836  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4837  !
4838  ! Intel MKL gives approximately constant (with respect to number of
4839  ! worker threads) acceleration factor which depends on CPU being used,
4840  ! problem size and "baseline" ALGLIB edition which is used for
4841  ! comparison.
4842  !
4843  ! Multithreaded acceleration is NOT supported for this function because
4844  ! bidiagonal decompostion is inherently sequential in nature.
4845  !
4846  ! We recommend you to read 'Working with commercial version' section of
4847  ! ALGLIB Reference Manual in order to find out how to use performance-
4848  ! related features provided by commercial edition of ALGLIB.
4849 
4850 Input parameters:
4851  A - source matrix. array[0..M-1, 0..N-1]
4852  M - number of rows in matrix A.
4853  N - number of columns in matrix A.
4854 
4855 Output parameters:
4856  A - matrices Q, B, P in compact form (see below).
4857  TauQ - scalar factors which are used to form matrix Q.
4858  TauP - scalar factors which are used to form matrix P.
4859 
4860 The main diagonal and one of the secondary diagonals of matrix A are
4861 replaced with bidiagonal matrix B. Other elements contain elementary
4862 reflections which form MxM matrix Q and NxN matrix P, respectively.
4863 
4864 If M>=N, B is the upper bidiagonal MxN matrix and is stored in the
4865 corresponding elements of matrix A. Matrix Q is represented as a
4866 product of elementary reflections Q = H(0)*H(1)*...*H(n-1), where
4867 H(i) = 1-tau*v*v'. Here tau is a scalar which is stored in TauQ[i], and
4868 vector v has the following structure: v(0:i-1)=0, v(i)=1, v(i+1:m-1) is
4869 stored in elements A(i+1:m-1,i). Matrix P is as follows: P =
4870 G(0)*G(1)*...*G(n-2), where G(i) = 1 - tau*u*u'. Tau is stored in TauP[i],
4871 u(0:i)=0, u(i+1)=1, u(i+2:n-1) is stored in elements A(i,i+2:n-1).
4872 
4873 If M<N, B is the lower bidiagonal MxN matrix and is stored in the
4874 corresponding elements of matrix A. Q = H(0)*H(1)*...*H(m-2), where
4875 H(i) = 1 - tau*v*v', tau is stored in TauQ, v(0:i)=0, v(i+1)=1, v(i+2:m-1)
4876 is stored in elements A(i+2:m-1,i). P = G(0)*G(1)*...*G(m-1),
4877 G(i) = 1-tau*u*u', tau is stored in TauP, u(0:i-1)=0, u(i)=1, u(i+1:n-1)
4878 is stored in A(i,i+1:n-1).
4879 
4880 EXAMPLE:
4881 
4882 m=6, n=5 (m > n): m=5, n=6 (m < n):
4883 
4884 ( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 )
4885 ( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 )
4886 ( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 )
4887 ( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 )
4888 ( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 )
4889 ( v1 v2 v3 v4 v5 )
4890 
4891 Here vi and ui are vectors which form H(i) and G(i), and d and e -
4892 are the diagonal and off-diagonal elements of matrix B.
4893 
4894  -- LAPACK routine (version 3.0) --
4895  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
4896  Courant Institute, Argonne National Lab, and Rice University
4897  September 30, 1994.
4898  Sergey Bochkanov, ALGLIB project, translation from FORTRAN to
4899  pseudocode, 2007-2010.
4900 *************************************************************************/
4901 void rmatrixbd(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tauq, real_1d_array &taup);
4902 
4903 
4904 /*************************************************************************
4905 Unpacking matrix Q which reduces a matrix to bidiagonal form.
4906 
4907 COMMERCIAL EDITION OF ALGLIB:
4908 
4909  ! Commercial version of ALGLIB includes one important improvement of
4910  ! this function, which can be used from C++ and C#:
4911  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4912  !
4913  ! Intel MKL gives approximately constant (with respect to number of
4914  ! worker threads) acceleration factor which depends on CPU being used,
4915  ! problem size and "baseline" ALGLIB edition which is used for
4916  ! comparison.
4917  !
4918  ! Multithreaded acceleration is NOT supported for this function.
4919  !
4920  ! We recommend you to read 'Working with commercial version' section of
4921  ! ALGLIB Reference Manual in order to find out how to use performance-
4922  ! related features provided by commercial edition of ALGLIB.
4923 
4924 Input parameters:
4925  QP - matrices Q and P in compact form.
4926  Output of ToBidiagonal subroutine.
4927  M - number of rows in matrix A.
4928  N - number of columns in matrix A.
4929  TAUQ - scalar factors which are used to form Q.
4930  Output of ToBidiagonal subroutine.
4931  QColumns - required number of columns in matrix Q.
4932  M>=QColumns>=0.
4933 
4934 Output parameters:
4935  Q - first QColumns columns of matrix Q.
4936  Array[0..M-1, 0..QColumns-1]
4937  If QColumns=0, the array is not modified.
4938 
4939  -- ALGLIB --
4940  2005-2010
4941  Bochkanov Sergey
4942 *************************************************************************/
4943 void rmatrixbdunpackq(const real_2d_array &qp, const ae_int_t m, const ae_int_t n, const real_1d_array &tauq, const ae_int_t qcolumns, real_2d_array &q);
4944 
4945 
4946 /*************************************************************************
4947 Multiplication by matrix Q which reduces matrix A to bidiagonal form.
4948 
4949 The algorithm allows pre- or post-multiply by Q or Q'.
4950 
4951 COMMERCIAL EDITION OF ALGLIB:
4952 
4953  ! Commercial version of ALGLIB includes one important improvement of
4954  ! this function, which can be used from C++ and C#:
4955  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
4956  !
4957  ! Intel MKL gives approximately constant (with respect to number of
4958  ! worker threads) acceleration factor which depends on CPU being used,
4959  ! problem size and "baseline" ALGLIB edition which is used for
4960  ! comparison.
4961  !
4962  ! Multithreaded acceleration is NOT supported for this function.
4963  !
4964  ! We recommend you to read 'Working with commercial version' section of
4965  ! ALGLIB Reference Manual in order to find out how to use performance-
4966  ! related features provided by commercial edition of ALGLIB.
4967 
4968 Input parameters:
4969  QP - matrices Q and P in compact form.
4970  Output of ToBidiagonal subroutine.
4971  M - number of rows in matrix A.
4972  N - number of columns in matrix A.
4973  TAUQ - scalar factors which are used to form Q.
4974  Output of ToBidiagonal subroutine.
4975  Z - multiplied matrix.
4976  array[0..ZRows-1,0..ZColumns-1]
4977  ZRows - number of rows in matrix Z. If FromTheRight=False,
4978  ZRows=M, otherwise ZRows can be arbitrary.
4979  ZColumns - number of columns in matrix Z. If FromTheRight=True,
4980  ZColumns=M, otherwise ZColumns can be arbitrary.
4981  FromTheRight - pre- or post-multiply.
4982  DoTranspose - multiply by Q or Q'.
4983 
4984 Output parameters:
4985  Z - product of Z and Q.
4986  Array[0..ZRows-1,0..ZColumns-1]
4987  If ZRows=0 or ZColumns=0, the array is not modified.
4988 
4989  -- ALGLIB --
4990  2005-2010
4991  Bochkanov Sergey
4992 *************************************************************************/
4993 void rmatrixbdmultiplybyq(const real_2d_array &qp, const ae_int_t m, const ae_int_t n, const real_1d_array &tauq, real_2d_array &z, const ae_int_t zrows, const ae_int_t zcolumns, const bool fromtheright, const bool dotranspose);
4994 
4995 
4996 /*************************************************************************
4997 Unpacking matrix P which reduces matrix A to bidiagonal form.
4998 The subroutine returns transposed matrix P.
4999 
5000 Input parameters:
5001  QP - matrices Q and P in compact form.
5002  Output of ToBidiagonal subroutine.
5003  M - number of rows in matrix A.
5004  N - number of columns in matrix A.
5005  TAUP - scalar factors which are used to form P.
5006  Output of ToBidiagonal subroutine.
5007  PTRows - required number of rows of matrix P^T. N >= PTRows >= 0.
5008 
5009 Output parameters:
5010  PT - first PTRows columns of matrix P^T
5011  Array[0..PTRows-1, 0..N-1]
5012  If PTRows=0, the array is not modified.
5013 
5014  -- ALGLIB --
5015  2005-2010
5016  Bochkanov Sergey
5017 *************************************************************************/
5018 void rmatrixbdunpackpt(const real_2d_array &qp, const ae_int_t m, const ae_int_t n, const real_1d_array &taup, const ae_int_t ptrows, real_2d_array &pt);
5019 
5020 
5021 /*************************************************************************
5022 Multiplication by matrix P which reduces matrix A to bidiagonal form.
5023 
5024 The algorithm allows pre- or post-multiply by P or P'.
5025 
5026 Input parameters:
5027  QP - matrices Q and P in compact form.
5028  Output of RMatrixBD subroutine.
5029  M - number of rows in matrix A.
5030  N - number of columns in matrix A.
5031  TAUP - scalar factors which are used to form P.
5032  Output of RMatrixBD subroutine.
5033  Z - multiplied matrix.
5034  Array whose indexes range within [0..ZRows-1,0..ZColumns-1].
5035  ZRows - number of rows in matrix Z. If FromTheRight=False,
5036  ZRows=N, otherwise ZRows can be arbitrary.
5037  ZColumns - number of columns in matrix Z. If FromTheRight=True,
5038  ZColumns=N, otherwise ZColumns can be arbitrary.
5039  FromTheRight - pre- or post-multiply.
5040  DoTranspose - multiply by P or P'.
5041 
5042 Output parameters:
5043  Z - product of Z and P.
5044  Array whose indexes range within [0..ZRows-1,0..ZColumns-1].
5045  If ZRows=0 or ZColumns=0, the array is not modified.
5046 
5047  -- ALGLIB --
5048  2005-2010
5049  Bochkanov Sergey
5050 *************************************************************************/
5051 void rmatrixbdmultiplybyp(const real_2d_array &qp, const ae_int_t m, const ae_int_t n, const real_1d_array &taup, real_2d_array &z, const ae_int_t zrows, const ae_int_t zcolumns, const bool fromtheright, const bool dotranspose);
5052 
5053 
5054 /*************************************************************************
5055 Unpacking of the main and secondary diagonals of bidiagonal decomposition
5056 of matrix A.
5057 
5058 Input parameters:
5059  B - output of RMatrixBD subroutine.
5060  M - number of rows in matrix B.
5061  N - number of columns in matrix B.
5062 
5063 Output parameters:
5064  IsUpper - True, if the matrix is upper bidiagonal.
5065  otherwise IsUpper is False.
5066  D - the main diagonal.
5067  Array whose index ranges within [0..Min(M,N)-1].
5068  E - the secondary diagonal (upper or lower, depending on
5069  the value of IsUpper).
5070  Array index ranges within [0..Min(M,N)-1], the last
5071  element is not used.
5072 
5073  -- ALGLIB --
5074  2005-2010
5075  Bochkanov Sergey
5076 *************************************************************************/
5077 void rmatrixbdunpackdiagonals(const real_2d_array &b, const ae_int_t m, const ae_int_t n, bool &isupper, real_1d_array &d, real_1d_array &e);
5078 
5079 
5080 /*************************************************************************
5081 Reduction of a square matrix to upper Hessenberg form: Q'*A*Q = H,
5082 where Q is an orthogonal matrix, H - Hessenberg matrix.
5083 
5084 COMMERCIAL EDITION OF ALGLIB:
5085 
5086  ! Commercial version of ALGLIB includes one important improvement of
5087  ! this function, which can be used from C++ and C#:
5088  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5089  !
5090  ! Intel MKL gives approximately constant (with respect to number of
5091  ! worker threads) acceleration factor which depends on CPU being used,
5092  ! problem size and "baseline" ALGLIB edition which is used for
5093  ! comparison.
5094  !
5095  ! Generally, commercial ALGLIB is several times faster than open-source
5096  ! generic C edition, and many times faster than open-source C# edition.
5097  !
5098  ! Multithreaded acceleration is NOT supported for this function.
5099  !
5100  ! We recommend you to read 'Working with commercial version' section of
5101  ! ALGLIB Reference Manual in order to find out how to use performance-
5102  ! related features provided by commercial edition of ALGLIB.
5103 
5104 Input parameters:
5105  A - matrix A with elements [0..N-1, 0..N-1]
5106  N - size of matrix A.
5107 
5108 Output parameters:
5109  A - matrices Q and P in compact form (see below).
5110  Tau - array of scalar factors which are used to form matrix Q.
5111  Array whose index ranges within [0..N-2]
5112 
5113 Matrix H is located on the main diagonal, on the lower secondary diagonal
5114 and above the main diagonal of matrix A. The elements which are used to
5115 form matrix Q are situated in array Tau and below the lower secondary
5116 diagonal of matrix A as follows:
5117 
5118 Matrix Q is represented as a product of elementary reflections
5119 
5120 Q = H(0)*H(2)*...*H(n-2),
5121 
5122 where each H(i) is given by
5123 
5124 H(i) = 1 - tau * v * (v^T)
5125 
5126 where tau is a scalar stored in Tau[I]; v - is a real vector,
5127 so that v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) stored in A(i+2:n-1,i).
5128 
5129  -- LAPACK routine (version 3.0) --
5130  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
5131  Courant Institute, Argonne National Lab, and Rice University
5132  October 31, 1992
5133 *************************************************************************/
5135 
5136 
5137 /*************************************************************************
5138 Unpacking matrix Q which reduces matrix A to upper Hessenberg form
5139 
5140 COMMERCIAL EDITION OF ALGLIB:
5141 
5142  ! Commercial version of ALGLIB includes one important improvement of
5143  ! this function, which can be used from C++ and C#:
5144  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5145  !
5146  ! Intel MKL gives approximately constant (with respect to number of
5147  ! worker threads) acceleration factor which depends on CPU being used,
5148  ! problem size and "baseline" ALGLIB edition which is used for
5149  ! comparison.
5150  !
5151  ! Generally, commercial ALGLIB is several times faster than open-source
5152  ! generic C edition, and many times faster than open-source C# edition.
5153  !
5154  ! Multithreaded acceleration is NOT supported for this function.
5155  !
5156  ! We recommend you to read 'Working with commercial version' section of
5157  ! ALGLIB Reference Manual in order to find out how to use performance-
5158  ! related features provided by commercial edition of ALGLIB.
5159 
5160 Input parameters:
5161  A - output of RMatrixHessenberg subroutine.
5162  N - size of matrix A.
5163  Tau - scalar factors which are used to form Q.
5164  Output of RMatrixHessenberg subroutine.
5165 
5166 Output parameters:
5167  Q - matrix Q.
5168  Array whose indexes range within [0..N-1, 0..N-1].
5169 
5170  -- ALGLIB --
5171  2005-2010
5172  Bochkanov Sergey
5173 *************************************************************************/
5175 
5176 
5177 /*************************************************************************
5178 Unpacking matrix H (the result of matrix A reduction to upper Hessenberg form)
5179 
5180 Input parameters:
5181  A - output of RMatrixHessenberg subroutine.
5182  N - size of matrix A.
5183 
5184 Output parameters:
5185  H - matrix H. Array whose indexes range within [0..N-1, 0..N-1].
5186 
5187  -- ALGLIB --
5188  2005-2010
5189  Bochkanov Sergey
5190 *************************************************************************/
5192 
5193 
5194 /*************************************************************************
5195 Reduction of a symmetric matrix which is given by its higher or lower
5196 triangular part to a tridiagonal matrix using orthogonal similarity
5197 transformation: Q'*A*Q=T.
5198 
5199 COMMERCIAL EDITION OF ALGLIB:
5200 
5201  ! Commercial version of ALGLIB includes one important improvement of
5202  ! this function, which can be used from C++ and C#:
5203  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5204  !
5205  ! Intel MKL gives approximately constant (with respect to number of
5206  ! worker threads) acceleration factor which depends on CPU being used,
5207  ! problem size and "baseline" ALGLIB edition which is used for
5208  ! comparison.
5209  !
5210  ! Generally, commercial ALGLIB is several times faster than open-source
5211  ! generic C edition, and many times faster than open-source C# edition.
5212  !
5213  ! Multithreaded acceleration is NOT supported for this function.
5214  !
5215  ! We recommend you to read 'Working with commercial version' section of
5216  ! ALGLIB Reference Manual in order to find out how to use performance-
5217  ! related features provided by commercial edition of ALGLIB.
5218 
5219 Input parameters:
5220  A - matrix to be transformed
5221  array with elements [0..N-1, 0..N-1].
5222  N - size of matrix A.
5223  IsUpper - storage format. If IsUpper = True, then matrix A is given
5224  by its upper triangle, and the lower triangle is not used
5225  and not modified by the algorithm, and vice versa
5226  if IsUpper = False.
5227 
5228 Output parameters:
5229  A - matrices T and Q in compact form (see lower)
5230  Tau - array of factors which are forming matrices H(i)
5231  array with elements [0..N-2].
5232  D - main diagonal of symmetric matrix T.
5233  array with elements [0..N-1].
5234  E - secondary diagonal of symmetric matrix T.
5235  array with elements [0..N-2].
5236 
5237 
5238  If IsUpper=True, the matrix Q is represented as a product of elementary
5239  reflectors
5240 
5241  Q = H(n-2) . . . H(2) H(0).
5242 
5243  Each H(i) has the form
5244 
5245  H(i) = I - tau * v * v'
5246 
5247  where tau is a real scalar, and v is a real vector with
5248  v(i+1:n-1) = 0, v(i) = 1, v(0:i-1) is stored on exit in
5249  A(0:i-1,i+1), and tau in TAU(i).
5250 
5251  If IsUpper=False, the matrix Q is represented as a product of elementary
5252  reflectors
5253 
5254  Q = H(0) H(2) . . . H(n-2).
5255 
5256  Each H(i) has the form
5257 
5258  H(i) = I - tau * v * v'
5259 
5260  where tau is a real scalar, and v is a real vector with
5261  v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) is stored on exit in A(i+2:n-1,i),
5262  and tau in TAU(i).
5263 
5264  The contents of A on exit are illustrated by the following examples
5265  with n = 5:
5266 
5267  if UPLO = 'U': if UPLO = 'L':
5268 
5269  ( d e v1 v2 v3 ) ( d )
5270  ( d e v2 v3 ) ( e d )
5271  ( d e v3 ) ( v0 e d )
5272  ( d e ) ( v0 v1 e d )
5273  ( d ) ( v0 v1 v2 e d )
5274 
5275  where d and e denote diagonal and off-diagonal elements of T, and vi
5276  denotes an element of the vector defining H(i).
5277 
5278  -- LAPACK routine (version 3.0) --
5279  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
5280  Courant Institute, Argonne National Lab, and Rice University
5281  October 31, 1992
5282 *************************************************************************/
5283 void smatrixtd(real_2d_array &a, const ae_int_t n, const bool isupper, real_1d_array &tau, real_1d_array &d, real_1d_array &e);
5284 
5285 
5286 /*************************************************************************
5287 Unpacking matrix Q which reduces symmetric matrix to a tridiagonal
5288 form.
5289 
5290 
5291 COMMERCIAL EDITION OF ALGLIB:
5292 
5293  ! Commercial version of ALGLIB includes one important improvement of
5294  ! this function, which can be used from C++ and C#:
5295  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5296  !
5297  ! Intel MKL gives approximately constant (with respect to number of
5298  ! worker threads) acceleration factor which depends on CPU being used,
5299  ! problem size and "baseline" ALGLIB edition which is used for
5300  ! comparison.
5301  !
5302  ! Generally, commercial ALGLIB is several times faster than open-source
5303  ! generic C edition, and many times faster than open-source C# edition.
5304  !
5305  ! Multithreaded acceleration is NOT supported for this function.
5306  !
5307  ! We recommend you to read 'Working with commercial version' section of
5308  ! ALGLIB Reference Manual in order to find out how to use performance-
5309  ! related features provided by commercial edition of ALGLIB.
5310 
5311 Input parameters:
5312  A - the result of a SMatrixTD subroutine
5313  N - size of matrix A.
5314  IsUpper - storage format (a parameter of SMatrixTD subroutine)
5315  Tau - the result of a SMatrixTD subroutine
5316 
5317 Output parameters:
5318  Q - transformation matrix.
5319  array with elements [0..N-1, 0..N-1].
5320 
5321  -- ALGLIB --
5322  Copyright 2005-2010 by Bochkanov Sergey
5323 *************************************************************************/
5324 void smatrixtdunpackq(const real_2d_array &a, const ae_int_t n, const bool isupper, const real_1d_array &tau, real_2d_array &q);
5325 
5326 
5327 /*************************************************************************
5328 Reduction of a Hermitian matrix which is given by its higher or lower
5329 triangular part to a real tridiagonal matrix using unitary similarity
5330 transformation: Q'*A*Q = T.
5331 
5332 
5333 COMMERCIAL EDITION OF ALGLIB:
5334 
5335  ! Commercial version of ALGLIB includes one important improvement of
5336  ! this function, which can be used from C++ and C#:
5337  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5338  !
5339  ! Intel MKL gives approximately constant (with respect to number of
5340  ! worker threads) acceleration factor which depends on CPU being used,
5341  ! problem size and "baseline" ALGLIB edition which is used for
5342  ! comparison.
5343  !
5344  ! Generally, commercial ALGLIB is several times faster than open-source
5345  ! generic C edition, and many times faster than open-source C# edition.
5346  !
5347  ! Multithreaded acceleration is NOT supported for this function.
5348  !
5349  ! We recommend you to read 'Working with commercial version' section of
5350  ! ALGLIB Reference Manual in order to find out how to use performance-
5351  ! related features provided by commercial edition of ALGLIB.
5352 
5353 Input parameters:
5354  A - matrix to be transformed
5355  array with elements [0..N-1, 0..N-1].
5356  N - size of matrix A.
5357  IsUpper - storage format. If IsUpper = True, then matrix A is given
5358  by its upper triangle, and the lower triangle is not used
5359  and not modified by the algorithm, and vice versa
5360  if IsUpper = False.
5361 
5362 Output parameters:
5363  A - matrices T and Q in compact form (see lower)
5364  Tau - array of factors which are forming matrices H(i)
5365  array with elements [0..N-2].
5366  D - main diagonal of real symmetric matrix T.
5367  array with elements [0..N-1].
5368  E - secondary diagonal of real symmetric matrix T.
5369  array with elements [0..N-2].
5370 
5371 
5372  If IsUpper=True, the matrix Q is represented as a product of elementary
5373  reflectors
5374 
5375  Q = H(n-2) . . . H(2) H(0).
5376 
5377  Each H(i) has the form
5378 
5379  H(i) = I - tau * v * v'
5380 
5381  where tau is a complex scalar, and v is a complex vector with
5382  v(i+1:n-1) = 0, v(i) = 1, v(0:i-1) is stored on exit in
5383  A(0:i-1,i+1), and tau in TAU(i).
5384 
5385  If IsUpper=False, the matrix Q is represented as a product of elementary
5386  reflectors
5387 
5388  Q = H(0) H(2) . . . H(n-2).
5389 
5390  Each H(i) has the form
5391 
5392  H(i) = I - tau * v * v'
5393 
5394  where tau is a complex scalar, and v is a complex vector with
5395  v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) is stored on exit in A(i+2:n-1,i),
5396  and tau in TAU(i).
5397 
5398  The contents of A on exit are illustrated by the following examples
5399  with n = 5:
5400 
5401  if UPLO = 'U': if UPLO = 'L':
5402 
5403  ( d e v1 v2 v3 ) ( d )
5404  ( d e v2 v3 ) ( e d )
5405  ( d e v3 ) ( v0 e d )
5406  ( d e ) ( v0 v1 e d )
5407  ( d ) ( v0 v1 v2 e d )
5408 
5409 where d and e denote diagonal and off-diagonal elements of T, and vi
5410 denotes an element of the vector defining H(i).
5411 
5412  -- LAPACK routine (version 3.0) --
5413  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
5414  Courant Institute, Argonne National Lab, and Rice University
5415  October 31, 1992
5416 *************************************************************************/
5417 void hmatrixtd(complex_2d_array &a, const ae_int_t n, const bool isupper, complex_1d_array &tau, real_1d_array &d, real_1d_array &e);
5418 
5419 
5420 /*************************************************************************
5421 Unpacking matrix Q which reduces a Hermitian matrix to a real tridiagonal
5422 form.
5423 
5424 
5425 COMMERCIAL EDITION OF ALGLIB:
5426 
5427  ! Commercial version of ALGLIB includes one important improvement of
5428  ! this function, which can be used from C++ and C#:
5429  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5430  !
5431  ! Intel MKL gives approximately constant (with respect to number of
5432  ! worker threads) acceleration factor which depends on CPU being used,
5433  ! problem size and "baseline" ALGLIB edition which is used for
5434  ! comparison.
5435  !
5436  ! Generally, commercial ALGLIB is several times faster than open-source
5437  ! generic C edition, and many times faster than open-source C# edition.
5438  !
5439  ! Multithreaded acceleration is NOT supported for this function.
5440  !
5441  ! We recommend you to read 'Working with commercial version' section of
5442  ! ALGLIB Reference Manual in order to find out how to use performance-
5443  ! related features provided by commercial edition of ALGLIB.
5444 
5445 Input parameters:
5446  A - the result of a HMatrixTD subroutine
5447  N - size of matrix A.
5448  IsUpper - storage format (a parameter of HMatrixTD subroutine)
5449  Tau - the result of a HMatrixTD subroutine
5450 
5451 Output parameters:
5452  Q - transformation matrix.
5453  array with elements [0..N-1, 0..N-1].
5454 
5455  -- ALGLIB --
5456  Copyright 2005-2010 by Bochkanov Sergey
5457 *************************************************************************/
5458 void hmatrixtdunpackq(const complex_2d_array &a, const ae_int_t n, const bool isupper, const complex_1d_array &tau, complex_2d_array &q);
5459 
5460 
5461 
5462 /*************************************************************************
5463 Singular value decomposition of a bidiagonal matrix (extended algorithm)
5464 
5465 COMMERCIAL EDITION OF ALGLIB:
5466 
5467  ! Commercial version of ALGLIB includes one important improvement of
5468  ! this function, which can be used from C++ and C#:
5469  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5470  !
5471  ! Intel MKL gives approximately constant (with respect to number of
5472  ! worker threads) acceleration factor which depends on CPU being used,
5473  ! problem size and "baseline" ALGLIB edition which is used for
5474  ! comparison.
5475  !
5476  ! Generally, commercial ALGLIB is several times faster than open-source
5477  ! generic C edition, and many times faster than open-source C# edition.
5478  !
5479  ! Multithreaded acceleration is NOT supported for this function.
5480  !
5481  ! We recommend you to read 'Working with commercial version' section of
5482  ! ALGLIB Reference Manual in order to find out how to use performance-
5483  ! related features provided by commercial edition of ALGLIB.
5484 
5485 The algorithm performs the singular value decomposition of a bidiagonal
5486 matrix B (upper or lower) representing it as B = Q*S*P^T, where Q and P -
5487 orthogonal matrices, S - diagonal matrix with non-negative elements on the
5488 main diagonal, in descending order.
5489 
5490 The algorithm finds singular values. In addition, the algorithm can
5491 calculate matrices Q and P (more precisely, not the matrices, but their
5492 product with given matrices U and VT - U*Q and (P^T)*VT)). Of course,
5493 matrices U and VT can be of any type, including identity. Furthermore, the
5494 algorithm can calculate Q'*C (this product is calculated more effectively
5495 than U*Q, because this calculation operates with rows instead of matrix
5496 columns).
5497 
5498 The feature of the algorithm is its ability to find all singular values
5499 including those which are arbitrarily close to 0 with relative accuracy
5500 close to machine precision. If the parameter IsFractionalAccuracyRequired
5501 is set to True, all singular values will have high relative accuracy close
5502 to machine precision. If the parameter is set to False, only the biggest
5503 singular value will have relative accuracy close to machine precision.
5504 The absolute error of other singular values is equal to the absolute error
5505 of the biggest singular value.
5506 
5507 Input parameters:
5508  D - main diagonal of matrix B.
5509  Array whose index ranges within [0..N-1].
5510  E - superdiagonal (or subdiagonal) of matrix B.
5511  Array whose index ranges within [0..N-2].
5512  N - size of matrix B.
5513  IsUpper - True, if the matrix is upper bidiagonal.
5514  IsFractionalAccuracyRequired -
5515  THIS PARAMETER IS IGNORED SINCE ALGLIB 3.5.0
5516  SINGULAR VALUES ARE ALWAYS SEARCHED WITH HIGH ACCURACY.
5517  U - matrix to be multiplied by Q.
5518  Array whose indexes range within [0..NRU-1, 0..N-1].
5519  The matrix can be bigger, in that case only the submatrix
5520  [0..NRU-1, 0..N-1] will be multiplied by Q.
5521  NRU - number of rows in matrix U.
5522  C - matrix to be multiplied by Q'.
5523  Array whose indexes range within [0..N-1, 0..NCC-1].
5524  The matrix can be bigger, in that case only the submatrix
5525  [0..N-1, 0..NCC-1] will be multiplied by Q'.
5526  NCC - number of columns in matrix C.
5527  VT - matrix to be multiplied by P^T.
5528  Array whose indexes range within [0..N-1, 0..NCVT-1].
5529  The matrix can be bigger, in that case only the submatrix
5530  [0..N-1, 0..NCVT-1] will be multiplied by P^T.
5531  NCVT - number of columns in matrix VT.
5532 
5533 Output parameters:
5534  D - singular values of matrix B in descending order.
5535  U - if NRU>0, contains matrix U*Q.
5536  VT - if NCVT>0, contains matrix (P^T)*VT.
5537  C - if NCC>0, contains matrix Q'*C.
5538 
5539 Result:
5540  True, if the algorithm has converged.
5541  False, if the algorithm hasn't converged (rare case).
5542 
5543 NOTE: multiplication U*Q is performed by means of transposition to internal
5544  buffer, multiplication and backward transposition. It helps to avoid
5545  costly columnwise operations and speed-up algorithm.
5546 
5547 Additional information:
5548  The type of convergence is controlled by the internal parameter TOL.
5549  If the parameter is greater than 0, the singular values will have
5550  relative accuracy TOL. If TOL<0, the singular values will have
5551  absolute accuracy ABS(TOL)*norm(B).
5552  By default, |TOL| falls within the range of 10*Epsilon and 100*Epsilon,
5553  where Epsilon is the machine precision. It is not recommended to use
5554  TOL less than 10*Epsilon since this will considerably slow down the
5555  algorithm and may not lead to error decreasing.
5556 
5557 History:
5558  * 31 March, 2007.
5559  changed MAXITR from 6 to 12.
5560 
5561  -- LAPACK routine (version 3.0) --
5562  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
5563  Courant Institute, Argonne National Lab, and Rice University
5564  October 31, 1999.
5565 *************************************************************************/
5566 bool rmatrixbdsvd(real_1d_array &d, const real_1d_array &e, const ae_int_t n, const bool isupper, const bool isfractionalaccuracyrequired, real_2d_array &u, const ae_int_t nru, real_2d_array &c, const ae_int_t ncc, real_2d_array &vt, const ae_int_t ncvt);
5567 
5568 /*************************************************************************
5569 Singular value decomposition of a rectangular matrix.
5570 
5571 COMMERCIAL EDITION OF ALGLIB:
5572 
5573  ! Commercial version of ALGLIB includes one important improvement of
5574  ! this function, which can be used from C++ and C#:
5575  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5576  !
5577  ! Intel MKL gives approximately constant (with respect to number of
5578  ! worker threads) acceleration factor which depends on CPU being used,
5579  ! problem size and "baseline" ALGLIB edition which is used for
5580  ! comparison.
5581  !
5582  ! Generally, commercial ALGLIB is several times faster than open-source
5583  ! generic C edition, and many times faster than open-source C# edition.
5584  !
5585  ! Multithreaded acceleration is only partially supported (some parts are
5586  ! optimized, but most - are not).
5587  !
5588  ! We recommend you to read 'Working with commercial version' section of
5589  ! ALGLIB Reference Manual in order to find out how to use performance-
5590  ! related features provided by commercial edition of ALGLIB.
5591 
5592 The algorithm calculates the singular value decomposition of a matrix of
5593 size MxN: A = U * S * V^T
5594 
5595 The algorithm finds the singular values and, optionally, matrices U and V^T.
5596 The algorithm can find both first min(M,N) columns of matrix U and rows of
5597 matrix V^T (singular vectors), and matrices U and V^T wholly (of sizes MxM
5598 and NxN respectively).
5599 
5600 Take into account that the subroutine does not return matrix V but V^T.
5601 
5602 Input parameters:
5603  A - matrix to be decomposed.
5604  Array whose indexes range within [0..M-1, 0..N-1].
5605  M - number of rows in matrix A.
5606  N - number of columns in matrix A.
5607  UNeeded - 0, 1 or 2. See the description of the parameter U.
5608  VTNeeded - 0, 1 or 2. See the description of the parameter VT.
5609  AdditionalMemory -
5610  If the parameter:
5611  * equals 0, the algorithm doesn't use additional
5612  memory (lower requirements, lower performance).
5613  * equals 1, the algorithm uses additional
5614  memory of size min(M,N)*min(M,N) of real numbers.
5615  It often speeds up the algorithm.
5616  * equals 2, the algorithm uses additional
5617  memory of size M*min(M,N) of real numbers.
5618  It allows to get a maximum performance.
5619  The recommended value of the parameter is 2.
5620 
5621 Output parameters:
5622  W - contains singular values in descending order.
5623  U - if UNeeded=0, U isn't changed, the left singular vectors
5624  are not calculated.
5625  if Uneeded=1, U contains left singular vectors (first
5626  min(M,N) columns of matrix U). Array whose indexes range
5627  within [0..M-1, 0..Min(M,N)-1].
5628  if UNeeded=2, U contains matrix U wholly. Array whose
5629  indexes range within [0..M-1, 0..M-1].
5630  VT - if VTNeeded=0, VT isn't changed, the right singular vectors
5631  are not calculated.
5632  if VTNeeded=1, VT contains right singular vectors (first
5633  min(M,N) rows of matrix V^T). Array whose indexes range
5634  within [0..min(M,N)-1, 0..N-1].
5635  if VTNeeded=2, VT contains matrix V^T wholly. Array whose
5636  indexes range within [0..N-1, 0..N-1].
5637 
5638  -- ALGLIB --
5639  Copyright 2005 by Bochkanov Sergey
5640 *************************************************************************/
5641 bool rmatrixsvd(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const ae_int_t uneeded, const ae_int_t vtneeded, const ae_int_t additionalmemory, real_1d_array &w, real_2d_array &u, real_2d_array &vt);
5642 bool smp_rmatrixsvd(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const ae_int_t uneeded, const ae_int_t vtneeded, const ae_int_t additionalmemory, real_1d_array &w, real_2d_array &u, real_2d_array &vt);
5643 
5644 /*************************************************************************
5645 This procedure initializes matrix norm estimator.
5646 
5647 USAGE:
5648 1. User initializes algorithm state with NormEstimatorCreate() call
5649 2. User calls NormEstimatorEstimateSparse() (or NormEstimatorIteration())
5650 3. User calls NormEstimatorResults() to get solution.
5651 
5652 INPUT PARAMETERS:
5653  M - number of rows in the matrix being estimated, M>0
5654  N - number of columns in the matrix being estimated, N>0
5655  NStart - number of random starting vectors
5656  recommended value - at least 5.
5657  NIts - number of iterations to do with best starting vector
5658  recommended value - at least 5.
5659 
5660 OUTPUT PARAMETERS:
5661  State - structure which stores algorithm state
5662 
5663 
5664 NOTE: this algorithm is effectively deterministic, i.e. it always returns
5665 same result when repeatedly called for the same matrix. In fact, algorithm
5666 uses randomized starting vectors, but internal random numbers generator
5667 always generates same sequence of the random values (it is a feature, not
5668 bug).
5669 
5670 Algorithm can be made non-deterministic with NormEstimatorSetSeed(0) call.
5671 
5672  -- ALGLIB --
5673  Copyright 06.12.2011 by Bochkanov Sergey
5674 *************************************************************************/
5675 void normestimatorcreate(const ae_int_t m, const ae_int_t n, const ae_int_t nstart, const ae_int_t nits, normestimatorstate &state);
5676 
5677 
5678 /*************************************************************************
5679 This function changes seed value used by algorithm. In some cases we need
5680 deterministic processing, i.e. subsequent calls must return equal results,
5681 in other cases we need non-deterministic algorithm which returns different
5682 results for the same matrix on every pass.
5683 
5684 Setting zero seed will lead to non-deterministic algorithm, while non-zero
5685 value will make our algorithm deterministic.
5686 
5687 INPUT PARAMETERS:
5688  State - norm estimator state, must be initialized with a call
5689  to NormEstimatorCreate()
5690  SeedVal - seed value, >=0. Zero value = non-deterministic algo.
5691 
5692  -- ALGLIB --
5693  Copyright 06.12.2011 by Bochkanov Sergey
5694 *************************************************************************/
5695 void normestimatorsetseed(const normestimatorstate &state, const ae_int_t seedval);
5696 
5697 
5698 /*************************************************************************
5699 This function estimates norm of the sparse M*N matrix A.
5700 
5701 INPUT PARAMETERS:
5702  State - norm estimator state, must be initialized with a call
5703  to NormEstimatorCreate()
5704  A - sparse M*N matrix, must be converted to CRS format
5705  prior to calling this function.
5706 
5707 After this function is over you can call NormEstimatorResults() to get
5708 estimate of the norm(A).
5709 
5710  -- ALGLIB --
5711  Copyright 06.12.2011 by Bochkanov Sergey
5712 *************************************************************************/
5714 
5715 
5716 /*************************************************************************
5717 Matrix norm estimation results
5718 
5719 INPUT PARAMETERS:
5720  State - algorithm state
5721 
5722 OUTPUT PARAMETERS:
5723  Nrm - estimate of the matrix norm, Nrm>=0
5724 
5725  -- ALGLIB --
5726  Copyright 06.12.2011 by Bochkanov Sergey
5727 *************************************************************************/
5728 void normestimatorresults(const normestimatorstate &state, double &nrm);
5729 
5730 /*************************************************************************
5731 This function initializes subspace iteration solver. This solver is used
5732 to solve symmetric real eigenproblems where just a few (top K) eigenvalues
5733 and corresponding eigenvectors is required.
5734 
5735 This solver can be significantly faster than complete EVD decomposition
5736 in the following case:
5737 * when only just a small fraction of top eigenpairs of dense matrix is
5738  required. When K approaches N, this solver is slower than complete dense
5739  EVD
5740 * when problem matrix is sparse (and/or is not known explicitly, i.e. only
5741  matrix-matrix product can be performed)
5742 
5743 USAGE (explicit dense/sparse matrix):
5744 1. User initializes algorithm state with eigsubspacecreate() call
5745 2. [optional] User tunes solver parameters by calling eigsubspacesetcond()
5746  or other functions
5747 3. User calls eigsubspacesolvedense() or eigsubspacesolvesparse() methods,
5748  which take algorithm state and 2D array or alglib.sparsematrix object.
5749 
5750 USAGE (out-of-core mode):
5751 1. User initializes algorithm state with eigsubspacecreate() call
5752 2. [optional] User tunes solver parameters by calling eigsubspacesetcond()
5753  or other functions
5754 3. User activates out-of-core mode of the solver and repeatedly calls
5755  communication functions in a loop like below:
5756  > alglib.eigsubspaceoocstart(state)
5757  > while alglib.eigsubspaceooccontinue(state) do
5758  > alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M)
5759  > alglib.eigsubspaceoocgetrequestdata(state, out X)
5760  > [calculate Y=A*X, with X=R^NxM]
5761  > alglib.eigsubspaceoocsendresult(state, in Y)
5762  > alglib.eigsubspaceoocstop(state, out W, out Z, out Report)
5763 
5764 
5765 INPUT PARAMETERS:
5766  N - problem dimensionality, N>0
5767  K - number of top eigenvector to calculate, 0<K<=N.
5768 
5769 OUTPUT PARAMETERS:
5770  State - structure which stores algorithm state
5771 
5772  -- ALGLIB --
5773  Copyright 16.01.2017 by Bochkanov Sergey
5774 *************************************************************************/
5775 void eigsubspacecreate(const ae_int_t n, const ae_int_t k, eigsubspacestate &state);
5776 
5777 
5778 /*************************************************************************
5779 Buffered version of constructor which aims to reuse previously allocated
5780 memory as much as possible.
5781 
5782  -- ALGLIB --
5783  Copyright 16.01.2017 by Bochkanov Sergey
5784 *************************************************************************/
5785 void eigsubspacecreatebuf(const ae_int_t n, const ae_int_t k, const eigsubspacestate &state);
5786 
5787 
5788 /*************************************************************************
5789 This function sets stopping critera for the solver:
5790 * error in eigenvector/value allowed by solver
5791 * maximum number of iterations to perform
5792 
5793 INPUT PARAMETERS:
5794  State - solver structure
5795  Eps - eps>=0, with non-zero value used to tell solver that
5796  it can stop after all eigenvalues converged with
5797  error roughly proportional to eps*MAX(LAMBDA_MAX),
5798  where LAMBDA_MAX is a maximum eigenvalue.
5799  Zero value means that no check for precision is
5800  performed.
5801  MaxIts - maxits>=0, with non-zero value used to tell solver
5802  that it can stop after maxits steps (no matter how
5803  precise current estimate is)
5804 
5805 NOTE: passing eps=0 and maxits=0 results in automatic selection of
5806  moderate eps as stopping criteria (1.0E-6 in current implementation,
5807  but it may change without notice).
5808 
5809 NOTE: very small values of eps are possible (say, 1.0E-12), although the
5810  larger problem you solve (N and/or K), the harder it is to find
5811  precise eigenvectors because rounding errors tend to accumulate.
5812 
5813 NOTE: passing non-zero eps results in some performance penalty, roughly
5814  equal to 2N*(2K)^2 FLOPs per iteration. These additional computations
5815  are required in order to estimate current error in eigenvalues via
5816  Rayleigh-Ritz process.
5817  Most of this additional time is spent in construction of ~2Kx2K
5818  symmetric subproblem whose eigenvalues are checked with exact
5819  eigensolver.
5820  This additional time is negligible if you search for eigenvalues of
5821  the large dense matrix, but may become noticeable on highly sparse
5822  EVD problems, where cost of matrix-matrix product is low.
5823  If you set eps to exactly zero, Rayleigh-Ritz phase is completely
5824  turned off.
5825 
5826  -- ALGLIB --
5827  Copyright 16.01.2017 by Bochkanov Sergey
5828 *************************************************************************/
5829 void eigsubspacesetcond(const eigsubspacestate &state, const double eps, const ae_int_t maxits);
5830 
5831 
5832 /*************************************************************************
5833 This function initiates out-of-core mode of subspace eigensolver. It
5834 should be used in conjunction with other out-of-core-related functions of
5835 this subspackage in a loop like below:
5836 
5837 > alglib.eigsubspaceoocstart(state)
5838 > while alglib.eigsubspaceooccontinue(state) do
5839 > alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M)
5840 > alglib.eigsubspaceoocgetrequestdata(state, out X)
5841 > [calculate Y=A*X, with X=R^NxM]
5842 > alglib.eigsubspaceoocsendresult(state, in Y)
5843 > alglib.eigsubspaceoocstop(state, out W, out Z, out Report)
5844 
5845 INPUT PARAMETERS:
5846  State - solver object
5847  MType - matrix type:
5848  * 0 for real symmetric matrix (solver assumes that
5849  matrix being processed is symmetric; symmetric
5850  direct eigensolver is used for smaller subproblems
5851  arising during solution of larger "full" task)
5852  Future versions of ALGLIB may introduce support for
5853  other matrix types; for now, only symmetric
5854  eigenproblems are supported.
5855 
5856 
5857  -- ALGLIB --
5858  Copyright 16.01.2017 by Bochkanov Sergey
5859 *************************************************************************/
5860 void eigsubspaceoocstart(const eigsubspacestate &state, const ae_int_t mtype);
5861 
5862 
5863 /*************************************************************************
5864 This function performs subspace iteration in the out-of-core mode. It
5865 should be used in conjunction with other out-of-core-related functions of
5866 this subspackage in a loop like below:
5867 
5868 > alglib.eigsubspaceoocstart(state)
5869 > while alglib.eigsubspaceooccontinue(state) do
5870 > alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M)
5871 > alglib.eigsubspaceoocgetrequestdata(state, out X)
5872 > [calculate Y=A*X, with X=R^NxM]
5873 > alglib.eigsubspaceoocsendresult(state, in Y)
5874 > alglib.eigsubspaceoocstop(state, out W, out Z, out Report)
5875 
5876 
5877  -- ALGLIB --
5878  Copyright 16.01.2017 by Bochkanov Sergey
5879 *************************************************************************/
5881 
5882 
5883 /*************************************************************************
5884 This function is used to retrieve information about out-of-core request
5885 sent by solver to user code: request type (current version of the solver
5886 sends only requests for matrix-matrix products) and request size (size of
5887 the matrices being multiplied).
5888 
5889 This function returns just request metrics; in order to get contents of
5890 the matrices being multiplied, use eigsubspaceoocgetrequestdata().
5891 
5892 It should be used in conjunction with other out-of-core-related functions
5893 of this subspackage in a loop like below:
5894 
5895 > alglib.eigsubspaceoocstart(state)
5896 > while alglib.eigsubspaceooccontinue(state) do
5897 > alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M)
5898 > alglib.eigsubspaceoocgetrequestdata(state, out X)
5899 > [calculate Y=A*X, with X=R^NxM]
5900 > alglib.eigsubspaceoocsendresult(state, in Y)
5901 > alglib.eigsubspaceoocstop(state, out W, out Z, out Report)
5902 
5903 INPUT PARAMETERS:
5904  State - solver running in out-of-core mode
5905 
5906 OUTPUT PARAMETERS:
5907  RequestType - type of the request to process:
5908  * 0 - for matrix-matrix product A*X, with A being
5909  NxN matrix whose eigenvalues/vectors are needed,
5910  and X being NxREQUESTSIZE one which is returned
5911  by the eigsubspaceoocgetrequestdata().
5912  RequestSize - size of the X matrix (number of columns), usually
5913  it is several times larger than number of vectors
5914  K requested by user.
5915 
5916 
5917  -- ALGLIB --
5918  Copyright 16.01.2017 by Bochkanov Sergey
5919 *************************************************************************/
5920 void eigsubspaceoocgetrequestinfo(const eigsubspacestate &state, ae_int_t &requesttype, ae_int_t &requestsize);
5921 
5922 
5923 /*************************************************************************
5924 This function is used to retrieve information about out-of-core request
5925 sent by solver to user code: matrix X (array[N,RequestSize) which have to
5926 be multiplied by out-of-core matrix A in a product A*X.
5927 
5928 This function returns just request data; in order to get size of the data
5929 prior to processing requestm, use eigsubspaceoocgetrequestinfo().
5930 
5931 It should be used in conjunction with other out-of-core-related functions
5932 of this subspackage in a loop like below:
5933 
5934 > alglib.eigsubspaceoocstart(state)
5935 > while alglib.eigsubspaceooccontinue(state) do
5936 > alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M)
5937 > alglib.eigsubspaceoocgetrequestdata(state, out X)
5938 > [calculate Y=A*X, with X=R^NxM]
5939 > alglib.eigsubspaceoocsendresult(state, in Y)
5940 > alglib.eigsubspaceoocstop(state, out W, out Z, out Report)
5941 
5942 INPUT PARAMETERS:
5943  State - solver running in out-of-core mode
5944  X - possibly preallocated storage; reallocated if
5945  needed, left unchanged, if large enough to store
5946  request data.
5947 
5948 OUTPUT PARAMETERS:
5949  X - array[N,RequestSize] or larger, leading rectangle
5950  is filled with dense matrix X.
5951 
5952 
5953  -- ALGLIB --
5954  Copyright 16.01.2017 by Bochkanov Sergey
5955 *************************************************************************/
5957 
5958 
5959 /*************************************************************************
5960 This function is used to send user reply to out-of-core request sent by
5961 solver. Usually it is product A*X for returned by solver matrix X.
5962 
5963 It should be used in conjunction with other out-of-core-related functions
5964 of this subspackage in a loop like below:
5965 
5966 > alglib.eigsubspaceoocstart(state)
5967 > while alglib.eigsubspaceooccontinue(state) do
5968 > alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M)
5969 > alglib.eigsubspaceoocgetrequestdata(state, out X)
5970 > [calculate Y=A*X, with X=R^NxM]
5971 > alglib.eigsubspaceoocsendresult(state, in Y)
5972 > alglib.eigsubspaceoocstop(state, out W, out Z, out Report)
5973 
5974 INPUT PARAMETERS:
5975  State - solver running in out-of-core mode
5976  AX - array[N,RequestSize] or larger, leading rectangle
5977  is filled with product A*X.
5978 
5979 
5980  -- ALGLIB --
5981  Copyright 16.01.2017 by Bochkanov Sergey
5982 *************************************************************************/
5984 
5985 
5986 /*************************************************************************
5987 This function finalizes out-of-core mode of subspace eigensolver. It
5988 should be used in conjunction with other out-of-core-related functions of
5989 this subspackage in a loop like below:
5990 
5991 > alglib.eigsubspaceoocstart(state)
5992 > while alglib.eigsubspaceooccontinue(state) do
5993 > alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M)
5994 > alglib.eigsubspaceoocgetrequestdata(state, out X)
5995 > [calculate Y=A*X, with X=R^NxM]
5996 > alglib.eigsubspaceoocsendresult(state, in Y)
5997 > alglib.eigsubspaceoocstop(state, out W, out Z, out Report)
5998 
5999 INPUT PARAMETERS:
6000  State - solver state
6001 
6002 OUTPUT PARAMETERS:
6003  W - array[K], depending on solver settings:
6004  * top K eigenvalues ordered by descending - if
6005  eigenvectors are returned in Z
6006  * zeros - if invariant subspace is returned in Z
6007  Z - array[N,K], depending on solver settings either:
6008  * matrix of eigenvectors found
6009  * orthogonal basis of K-dimensional invariant subspace
6010  Rep - report with additional parameters
6011 
6012  -- ALGLIB --
6013  Copyright 16.01.2017 by Bochkanov Sergey
6014 *************************************************************************/
6016 
6017 
6018 /*************************************************************************
6019 This function runs eigensolver for dense NxN symmetric matrix A, given by
6020 upper or lower triangle.
6021 
6022 This function can not process nonsymmetric matrices.
6023 
6024 COMMERCIAL EDITION OF ALGLIB:
6025 
6026  ! Commercial version of ALGLIB includes two important improvements of
6027  ! this function, which can be used from C++ and C#:
6028  ! * multithreading support
6029  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
6030  !
6031  ! For a situation when you need just a few eigenvectors (~1-10),
6032  ! multithreading typically gives sublinear (wrt to cores count) speedup.
6033  ! For larger problems it may give you nearly linear increase in
6034  ! performance.
6035  !
6036  ! Intel MKL gives approximately constant (with respect to number of
6037  ! worker threads) acceleration factor which depends on CPU being used,
6038  ! problem size and "baseline" ALGLIB edition which is used for
6039  ! comparison. Best results are achieved for high-dimensional problems
6040  ! (NVars is at least 256).
6041  !
6042  ! We recommend you to read 'Working with commercial version' section of
6043  ! ALGLIB Reference Manual in order to find out how to use performance-
6044  ! related features provided by commercial edition of ALGLIB.
6045 
6046 INPUT PARAMETERS:
6047  State - solver state
6048  A - array[N,N], symmetric NxN matrix given by one of its
6049  triangles
6050  IsUpper - whether upper or lower triangle of A is given (the
6051  other one is not referenced at all).
6052 
6053 OUTPUT PARAMETERS:
6054  W - array[K], top K eigenvalues ordered by descending
6055  of their absolute values
6056  Z - array[N,K], matrix of eigenvectors found
6057  Rep - report with additional parameters
6058 
6059 NOTE: internally this function allocates a copy of NxN dense A. You should
6060  take it into account when working with very large matrices occupying
6061  almost all RAM.
6062 
6063  -- ALGLIB --
6064  Copyright 16.01.2017 by Bochkanov Sergey
6065 *************************************************************************/
6066 void eigsubspacesolvedenses(const eigsubspacestate &state, const real_2d_array &a, const bool isupper, real_1d_array &w, real_2d_array &z, eigsubspacereport &rep);
6067 void smp_eigsubspacesolvedenses(const eigsubspacestate &state, const real_2d_array &a, const bool isupper, real_1d_array &w, real_2d_array &z, eigsubspacereport &rep);
6068 
6069 
6070 /*************************************************************************
6071 This function runs eigensolver for dense NxN symmetric matrix A, given by
6072 upper or lower triangle.
6073 
6074 This function can not process nonsymmetric matrices.
6075 
6076 INPUT PARAMETERS:
6077  State - solver state
6078  A - NxN symmetric matrix given by one of its triangles
6079  IsUpper - whether upper or lower triangle of A is given (the
6080  other one is not referenced at all).
6081 
6082 OUTPUT PARAMETERS:
6083  W - array[K], top K eigenvalues ordered by descending
6084  of their absolute values
6085  Z - array[N,K], matrix of eigenvectors found
6086  Rep - report with additional parameters
6087 
6088  -- ALGLIB --
6089  Copyright 16.01.2017 by Bochkanov Sergey
6090 *************************************************************************/
6091 void eigsubspacesolvesparses(const eigsubspacestate &state, const sparsematrix &a, const bool isupper, real_1d_array &w, real_2d_array &z, eigsubspacereport &rep);
6092 
6093 
6094 /*************************************************************************
6095 Finding the eigenvalues and eigenvectors of a symmetric matrix
6096 
6097 The algorithm finds eigen pairs of a symmetric matrix by reducing it to
6098 tridiagonal form and using the QL/QR algorithm.
6099 
6100 COMMERCIAL EDITION OF ALGLIB:
6101 
6102  ! Commercial version of ALGLIB includes one important improvement of
6103  ! this function, which can be used from C++ and C#:
6104  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
6105  !
6106  ! Intel MKL gives approximately constant (with respect to number of
6107  ! worker threads) acceleration factor which depends on CPU being used,
6108  ! problem size and "baseline" ALGLIB edition which is used for
6109  ! comparison.
6110  !
6111  ! Generally, commercial ALGLIB is several times faster than open-source
6112  ! generic C edition, and many times faster than open-source C# edition.
6113  !
6114  ! Multithreaded acceleration is NOT supported for this function.
6115  !
6116  ! We recommend you to read 'Working with commercial version' section of
6117  ! ALGLIB Reference Manual in order to find out how to use performance-
6118  ! related features provided by commercial edition of ALGLIB.
6119 
6120 Input parameters:
6121  A - symmetric matrix which is given by its upper or lower
6122  triangular part.
6123  Array whose indexes range within [0..N-1, 0..N-1].
6124  N - size of matrix A.
6125  ZNeeded - flag controlling whether the eigenvectors are needed or not.
6126  If ZNeeded is equal to:
6127  * 0, the eigenvectors are not returned;
6128  * 1, the eigenvectors are returned.
6129  IsUpper - storage format.
6130 
6131 Output parameters:
6132  D - eigenvalues in ascending order.
6133  Array whose index ranges within [0..N-1].
6134  Z - if ZNeeded is equal to:
6135  * 0, Z hasn't changed;
6136  * 1, Z contains the eigenvectors.
6137  Array whose indexes range within [0..N-1, 0..N-1].
6138  The eigenvectors are stored in the matrix columns.
6139 
6140 Result:
6141  True, if the algorithm has converged.
6142  False, if the algorithm hasn't converged (rare case).
6143 
6144  -- ALGLIB --
6145  Copyright 2005-2008 by Bochkanov Sergey
6146 *************************************************************************/
6147 bool smatrixevd(const real_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, real_1d_array &d, real_2d_array &z);
6148 
6149 
6150 /*************************************************************************
6151 Subroutine for finding the eigenvalues (and eigenvectors) of a symmetric
6152 matrix in a given half open interval (A, B] by using a bisection and
6153 inverse iteration
6154 
6155 Input parameters:
6156  A - symmetric matrix which is given by its upper or lower
6157  triangular part. Array [0..N-1, 0..N-1].
6158  N - size of matrix A.
6159  ZNeeded - flag controlling whether the eigenvectors are needed or not.
6160  If ZNeeded is equal to:
6161  * 0, the eigenvectors are not returned;
6162  * 1, the eigenvectors are returned.
6163  IsUpperA - storage format of matrix A.
6164  B1, B2 - half open interval (B1, B2] to search eigenvalues in.
6165 
6166 Output parameters:
6167  M - number of eigenvalues found in a given half-interval (M>=0).
6168  W - array of the eigenvalues found.
6169  Array whose index ranges within [0..M-1].
6170  Z - if ZNeeded is equal to:
6171  * 0, Z hasn't changed;
6172  * 1, Z contains eigenvectors.
6173  Array whose indexes range within [0..N-1, 0..M-1].
6174  The eigenvectors are stored in the matrix columns.
6175 
6176 Result:
6177  True, if successful. M contains the number of eigenvalues in the given
6178  half-interval (could be equal to 0), W contains the eigenvalues,
6179  Z contains the eigenvectors (if needed).
6180 
6181  False, if the bisection method subroutine wasn't able to find the
6182  eigenvalues in the given interval or if the inverse iteration subroutine
6183  wasn't able to find all the corresponding eigenvectors.
6184  In that case, the eigenvalues and eigenvectors are not returned,
6185  M is equal to 0.
6186 
6187  -- ALGLIB --
6188  Copyright 07.01.2006 by Bochkanov Sergey
6189 *************************************************************************/
6190 bool smatrixevdr(const real_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, const double b1, const double b2, ae_int_t &m, real_1d_array &w, real_2d_array &z);
6191 
6192 
6193 /*************************************************************************
6194 Subroutine for finding the eigenvalues and eigenvectors of a symmetric
6195 matrix with given indexes by using bisection and inverse iteration methods.
6196 
6197 Input parameters:
6198  A - symmetric matrix which is given by its upper or lower
6199  triangular part. Array whose indexes range within [0..N-1, 0..N-1].
6200  N - size of matrix A.
6201  ZNeeded - flag controlling whether the eigenvectors are needed or not.
6202  If ZNeeded is equal to:
6203  * 0, the eigenvectors are not returned;
6204  * 1, the eigenvectors are returned.
6205  IsUpperA - storage format of matrix A.
6206  I1, I2 - index interval for searching (from I1 to I2).
6207  0 <= I1 <= I2 <= N-1.
6208 
6209 Output parameters:
6210  W - array of the eigenvalues found.
6211  Array whose index ranges within [0..I2-I1].
6212  Z - if ZNeeded is equal to:
6213  * 0, Z hasn't changed;
6214  * 1, Z contains eigenvectors.
6215  Array whose indexes range within [0..N-1, 0..I2-I1].
6216  In that case, the eigenvectors are stored in the matrix columns.
6217 
6218 Result:
6219  True, if successful. W contains the eigenvalues, Z contains the
6220  eigenvectors (if needed).
6221 
6222  False, if the bisection method subroutine wasn't able to find the
6223  eigenvalues in the given interval or if the inverse iteration subroutine
6224  wasn't able to find all the corresponding eigenvectors.
6225  In that case, the eigenvalues and eigenvectors are not returned.
6226 
6227  -- ALGLIB --
6228  Copyright 07.01.2006 by Bochkanov Sergey
6229 *************************************************************************/
6230 bool smatrixevdi(const real_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, const ae_int_t i1, const ae_int_t i2, real_1d_array &w, real_2d_array &z);
6231 
6232 
6233 /*************************************************************************
6234 Finding the eigenvalues and eigenvectors of a Hermitian matrix
6235 
6236 The algorithm finds eigen pairs of a Hermitian matrix by reducing it to
6237 real tridiagonal form and using the QL/QR algorithm.
6238 
6239 COMMERCIAL EDITION OF ALGLIB:
6240 
6241  ! Commercial version of ALGLIB includes one important improvement of
6242  ! this function, which can be used from C++ and C#:
6243  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
6244  !
6245  ! Intel MKL gives approximately constant (with respect to number of
6246  ! worker threads) acceleration factor which depends on CPU being used,
6247  ! problem size and "baseline" ALGLIB edition which is used for
6248  ! comparison.
6249  !
6250  ! Generally, commercial ALGLIB is several times faster than open-source
6251  ! generic C edition, and many times faster than open-source C# edition.
6252  !
6253  ! Multithreaded acceleration is NOT supported for this function.
6254  !
6255  ! We recommend you to read 'Working with commercial version' section of
6256  ! ALGLIB Reference Manual in order to find out how to use performance-
6257  ! related features provided by commercial edition of ALGLIB.
6258 
6259 Input parameters:
6260  A - Hermitian matrix which is given by its upper or lower
6261  triangular part.
6262  Array whose indexes range within [0..N-1, 0..N-1].
6263  N - size of matrix A.
6264  IsUpper - storage format.
6265  ZNeeded - flag controlling whether the eigenvectors are needed or
6266  not. If ZNeeded is equal to:
6267  * 0, the eigenvectors are not returned;
6268  * 1, the eigenvectors are returned.
6269 
6270 Output parameters:
6271  D - eigenvalues in ascending order.
6272  Array whose index ranges within [0..N-1].
6273  Z - if ZNeeded is equal to:
6274  * 0, Z hasn't changed;
6275  * 1, Z contains the eigenvectors.
6276  Array whose indexes range within [0..N-1, 0..N-1].
6277  The eigenvectors are stored in the matrix columns.
6278 
6279 Result:
6280  True, if the algorithm has converged.
6281  False, if the algorithm hasn't converged (rare case).
6282 
6283 Note:
6284  eigenvectors of Hermitian matrix are defined up to multiplication by
6285  a complex number L, such that |L|=1.
6286 
6287  -- ALGLIB --
6288  Copyright 2005, 23 March 2007 by Bochkanov Sergey
6289 *************************************************************************/
6290 bool hmatrixevd(const complex_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, real_1d_array &d, complex_2d_array &z);
6291 
6292 
6293 /*************************************************************************
6294 Subroutine for finding the eigenvalues (and eigenvectors) of a Hermitian
6295 matrix in a given half-interval (A, B] by using a bisection and inverse
6296 iteration
6297 
6298 Input parameters:
6299  A - Hermitian matrix which is given by its upper or lower
6300  triangular part. Array whose indexes range within
6301  [0..N-1, 0..N-1].
6302  N - size of matrix A.
6303  ZNeeded - flag controlling whether the eigenvectors are needed or
6304  not. If ZNeeded is equal to:
6305  * 0, the eigenvectors are not returned;
6306  * 1, the eigenvectors are returned.
6307  IsUpperA - storage format of matrix A.
6308  B1, B2 - half-interval (B1, B2] to search eigenvalues in.
6309 
6310 Output parameters:
6311  M - number of eigenvalues found in a given half-interval, M>=0
6312  W - array of the eigenvalues found.
6313  Array whose index ranges within [0..M-1].
6314  Z - if ZNeeded is equal to:
6315  * 0, Z hasn't changed;
6316  * 1, Z contains eigenvectors.
6317  Array whose indexes range within [0..N-1, 0..M-1].
6318  The eigenvectors are stored in the matrix columns.
6319 
6320 Result:
6321  True, if successful. M contains the number of eigenvalues in the given
6322  half-interval (could be equal to 0), W contains the eigenvalues,
6323  Z contains the eigenvectors (if needed).
6324 
6325  False, if the bisection method subroutine wasn't able to find the
6326  eigenvalues in the given interval or if the inverse iteration
6327  subroutine wasn't able to find all the corresponding eigenvectors.
6328  In that case, the eigenvalues and eigenvectors are not returned, M is
6329  equal to 0.
6330 
6331 Note:
6332  eigen vectors of Hermitian matrix are defined up to multiplication by
6333  a complex number L, such as |L|=1.
6334 
6335  -- ALGLIB --
6336  Copyright 07.01.2006, 24.03.2007 by Bochkanov Sergey.
6337 *************************************************************************/
6338 bool hmatrixevdr(const complex_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, const double b1, const double b2, ae_int_t &m, real_1d_array &w, complex_2d_array &z);
6339 
6340 
6341 /*************************************************************************
6342 Subroutine for finding the eigenvalues and eigenvectors of a Hermitian
6343 matrix with given indexes by using bisection and inverse iteration methods
6344 
6345 Input parameters:
6346  A - Hermitian matrix which is given by its upper or lower
6347  triangular part.
6348  Array whose indexes range within [0..N-1, 0..N-1].
6349  N - size of matrix A.
6350  ZNeeded - flag controlling whether the eigenvectors are needed or
6351  not. If ZNeeded is equal to:
6352  * 0, the eigenvectors are not returned;
6353  * 1, the eigenvectors are returned.
6354  IsUpperA - storage format of matrix A.
6355  I1, I2 - index interval for searching (from I1 to I2).
6356  0 <= I1 <= I2 <= N-1.
6357 
6358 Output parameters:
6359  W - array of the eigenvalues found.
6360  Array whose index ranges within [0..I2-I1].
6361  Z - if ZNeeded is equal to:
6362  * 0, Z hasn't changed;
6363  * 1, Z contains eigenvectors.
6364  Array whose indexes range within [0..N-1, 0..I2-I1].
6365  In that case, the eigenvectors are stored in the matrix
6366  columns.
6367 
6368 Result:
6369  True, if successful. W contains the eigenvalues, Z contains the
6370  eigenvectors (if needed).
6371 
6372  False, if the bisection method subroutine wasn't able to find the
6373  eigenvalues in the given interval or if the inverse iteration
6374  subroutine wasn't able to find all the corresponding eigenvectors.
6375  In that case, the eigenvalues and eigenvectors are not returned.
6376 
6377 Note:
6378  eigen vectors of Hermitian matrix are defined up to multiplication by
6379  a complex number L, such as |L|=1.
6380 
6381  -- ALGLIB --
6382  Copyright 07.01.2006, 24.03.2007 by Bochkanov Sergey.
6383 *************************************************************************/
6384 bool hmatrixevdi(const complex_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, const ae_int_t i1, const ae_int_t i2, real_1d_array &w, complex_2d_array &z);
6385 
6386 
6387 /*************************************************************************
6388 Finding the eigenvalues and eigenvectors of a tridiagonal symmetric matrix
6389 
6390 The algorithm finds the eigen pairs of a tridiagonal symmetric matrix by
6391 using an QL/QR algorithm with implicit shifts.
6392 
6393 COMMERCIAL EDITION OF ALGLIB:
6394 
6395  ! Commercial version of ALGLIB includes one important improvement of
6396  ! this function, which can be used from C++ and C#:
6397  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
6398  !
6399  ! Intel MKL gives approximately constant (with respect to number of
6400  ! worker threads) acceleration factor which depends on CPU being used,
6401  ! problem size and "baseline" ALGLIB edition which is used for
6402  ! comparison.
6403  !
6404  ! Generally, commercial ALGLIB is several times faster than open-source
6405  ! generic C edition, and many times faster than open-source C# edition.
6406  !
6407  ! Multithreaded acceleration is NOT supported for this function.
6408  !
6409  ! We recommend you to read 'Working with commercial version' section of
6410  ! ALGLIB Reference Manual in order to find out how to use performance-
6411  ! related features provided by commercial edition of ALGLIB.
6412 
6413 Input parameters:
6414  D - the main diagonal of a tridiagonal matrix.
6415  Array whose index ranges within [0..N-1].
6416  E - the secondary diagonal of a tridiagonal matrix.
6417  Array whose index ranges within [0..N-2].
6418  N - size of matrix A.
6419  ZNeeded - flag controlling whether the eigenvectors are needed or not.
6420  If ZNeeded is equal to:
6421  * 0, the eigenvectors are not needed;
6422  * 1, the eigenvectors of a tridiagonal matrix
6423  are multiplied by the square matrix Z. It is used if the
6424  tridiagonal matrix is obtained by the similarity
6425  transformation of a symmetric matrix;
6426  * 2, the eigenvectors of a tridiagonal matrix replace the
6427  square matrix Z;
6428  * 3, matrix Z contains the first row of the eigenvectors
6429  matrix.
6430  Z - if ZNeeded=1, Z contains the square matrix by which the
6431  eigenvectors are multiplied.
6432  Array whose indexes range within [0..N-1, 0..N-1].
6433 
6434 Output parameters:
6435  D - eigenvalues in ascending order.
6436  Array whose index ranges within [0..N-1].
6437  Z - if ZNeeded is equal to:
6438  * 0, Z hasn't changed;
6439  * 1, Z contains the product of a given matrix (from the left)
6440  and the eigenvectors matrix (from the right);
6441  * 2, Z contains the eigenvectors.
6442  * 3, Z contains the first row of the eigenvectors matrix.
6443  If ZNeeded<3, Z is the array whose indexes range within [0..N-1, 0..N-1].
6444  In that case, the eigenvectors are stored in the matrix columns.
6445  If ZNeeded=3, Z is the array whose indexes range within [0..0, 0..N-1].
6446 
6447 Result:
6448  True, if the algorithm has converged.
6449  False, if the algorithm hasn't converged.
6450 
6451  -- LAPACK routine (version 3.0) --
6452  Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
6453  Courant Institute, Argonne National Lab, and Rice University
6454  September 30, 1994
6455 *************************************************************************/
6456 bool smatrixtdevd(real_1d_array &d, const real_1d_array &e, const ae_int_t n, const ae_int_t zneeded, real_2d_array &z);
6457 
6458 
6459 /*************************************************************************
6460 Subroutine for finding the tridiagonal matrix eigenvalues/vectors in a
6461 given half-interval (A, B] by using bisection and inverse iteration.
6462 
6463 Input parameters:
6464  D - the main diagonal of a tridiagonal matrix.
6465  Array whose index ranges within [0..N-1].
6466  E - the secondary diagonal of a tridiagonal matrix.
6467  Array whose index ranges within [0..N-2].
6468  N - size of matrix, N>=0.
6469  ZNeeded - flag controlling whether the eigenvectors are needed or not.
6470  If ZNeeded is equal to:
6471  * 0, the eigenvectors are not needed;
6472  * 1, the eigenvectors of a tridiagonal matrix are multiplied
6473  by the square matrix Z. It is used if the tridiagonal
6474  matrix is obtained by the similarity transformation
6475  of a symmetric matrix.
6476  * 2, the eigenvectors of a tridiagonal matrix replace matrix Z.
6477  A, B - half-interval (A, B] to search eigenvalues in.
6478  Z - if ZNeeded is equal to:
6479  * 0, Z isn't used and remains unchanged;
6480  * 1, Z contains the square matrix (array whose indexes range
6481  within [0..N-1, 0..N-1]) which reduces the given symmetric
6482  matrix to tridiagonal form;
6483  * 2, Z isn't used (but changed on the exit).
6484 
6485 Output parameters:
6486  D - array of the eigenvalues found.
6487  Array whose index ranges within [0..M-1].
6488  M - number of eigenvalues found in the given half-interval (M>=0).
6489  Z - if ZNeeded is equal to:
6490  * 0, doesn't contain any information;
6491  * 1, contains the product of a given NxN matrix Z (from the
6492  left) and NxM matrix of the eigenvectors found (from the
6493  right). Array whose indexes range within [0..N-1, 0..M-1].
6494  * 2, contains the matrix of the eigenvectors found.
6495  Array whose indexes range within [0..N-1, 0..M-1].
6496 
6497 Result:
6498 
6499  True, if successful. In that case, M contains the number of eigenvalues
6500  in the given half-interval (could be equal to 0), D contains the eigenvalues,
6501  Z contains the eigenvectors (if needed).
6502  It should be noted that the subroutine changes the size of arrays D and Z.
6503 
6504  False, if the bisection method subroutine wasn't able to find the
6505  eigenvalues in the given interval or if the inverse iteration subroutine
6506  wasn't able to find all the corresponding eigenvectors. In that case,
6507  the eigenvalues and eigenvectors are not returned, M is equal to 0.
6508 
6509  -- ALGLIB --
6510  Copyright 31.03.2008 by Bochkanov Sergey
6511 *************************************************************************/
6512 bool smatrixtdevdr(real_1d_array &d, const real_1d_array &e, const ae_int_t n, const ae_int_t zneeded, const double a, const double b, ae_int_t &m, real_2d_array &z);
6513 
6514 
6515 /*************************************************************************
6516 Subroutine for finding tridiagonal matrix eigenvalues/vectors with given
6517 indexes (in ascending order) by using the bisection and inverse iteraion.
6518 
6519 Input parameters:
6520  D - the main diagonal of a tridiagonal matrix.
6521  Array whose index ranges within [0..N-1].
6522  E - the secondary diagonal of a tridiagonal matrix.
6523  Array whose index ranges within [0..N-2].
6524  N - size of matrix. N>=0.
6525  ZNeeded - flag controlling whether the eigenvectors are needed or not.
6526  If ZNeeded is equal to:
6527  * 0, the eigenvectors are not needed;
6528  * 1, the eigenvectors of a tridiagonal matrix are multiplied
6529  by the square matrix Z. It is used if the
6530  tridiagonal matrix is obtained by the similarity transformation
6531  of a symmetric matrix.
6532  * 2, the eigenvectors of a tridiagonal matrix replace
6533  matrix Z.
6534  I1, I2 - index interval for searching (from I1 to I2).
6535  0 <= I1 <= I2 <= N-1.
6536  Z - if ZNeeded is equal to:
6537  * 0, Z isn't used and remains unchanged;
6538  * 1, Z contains the square matrix (array whose indexes range within [0..N-1, 0..N-1])
6539  which reduces the given symmetric matrix to tridiagonal form;
6540  * 2, Z isn't used (but changed on the exit).
6541 
6542 Output parameters:
6543  D - array of the eigenvalues found.
6544  Array whose index ranges within [0..I2-I1].
6545  Z - if ZNeeded is equal to:
6546  * 0, doesn't contain any information;
6547  * 1, contains the product of a given NxN matrix Z (from the left) and
6548  Nx(I2-I1) matrix of the eigenvectors found (from the right).
6549  Array whose indexes range within [0..N-1, 0..I2-I1].
6550  * 2, contains the matrix of the eigenvalues found.
6551  Array whose indexes range within [0..N-1, 0..I2-I1].
6552 
6553 
6554 Result:
6555 
6556  True, if successful. In that case, D contains the eigenvalues,
6557  Z contains the eigenvectors (if needed).
6558  It should be noted that the subroutine changes the size of arrays D and Z.
6559 
6560  False, if the bisection method subroutine wasn't able to find the eigenvalues
6561  in the given interval or if the inverse iteration subroutine wasn't able
6562  to find all the corresponding eigenvectors. In that case, the eigenvalues
6563  and eigenvectors are not returned.
6564 
6565  -- ALGLIB --
6566  Copyright 25.12.2005 by Bochkanov Sergey
6567 *************************************************************************/
6568 bool smatrixtdevdi(real_1d_array &d, const real_1d_array &e, const ae_int_t n, const ae_int_t zneeded, const ae_int_t i1, const ae_int_t i2, real_2d_array &z);
6569 
6570 
6571 /*************************************************************************
6572 Finding eigenvalues and eigenvectors of a general (unsymmetric) matrix
6573 
6574 COMMERCIAL EDITION OF ALGLIB:
6575 
6576  ! Commercial version of ALGLIB includes one important improvement of
6577  ! this function, which can be used from C++ and C#:
6578  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
6579  !
6580  ! Intel MKL gives approximately constant (with respect to number of
6581  ! worker threads) acceleration factor which depends on CPU being used,
6582  ! problem size and "baseline" ALGLIB edition which is used for
6583  ! comparison. Speed-up provided by MKL for this particular problem (EVD)
6584  ! is really high, because MKL uses combination of (a) better low-level
6585  ! optimizations, and (b) better EVD algorithms.
6586  !
6587  ! On one particular SSE-capable machine for N=1024, commercial MKL-
6588  ! -capable ALGLIB was:
6589  ! * 7-10 times faster than open source "generic C" version
6590  ! * 15-18 times faster than "pure C#" version
6591  !
6592  ! Multithreaded acceleration is NOT supported for this function.
6593  !
6594  ! We recommend you to read 'Working with commercial version' section of
6595  ! ALGLIB Reference Manual in order to find out how to use performance-
6596  ! related features provided by commercial edition of ALGLIB.
6597 
6598 The algorithm finds eigenvalues and eigenvectors of a general matrix by
6599 using the QR algorithm with multiple shifts. The algorithm can find
6600 eigenvalues and both left and right eigenvectors.
6601 
6602 The right eigenvector is a vector x such that A*x = w*x, and the left
6603 eigenvector is a vector y such that y'*A = w*y' (here y' implies a complex
6604 conjugate transposition of vector y).
6605 
6606 Input parameters:
6607  A - matrix. Array whose indexes range within [0..N-1, 0..N-1].
6608  N - size of matrix A.
6609  VNeeded - flag controlling whether eigenvectors are needed or not.
6610  If VNeeded is equal to:
6611  * 0, eigenvectors are not returned;
6612  * 1, right eigenvectors are returned;
6613  * 2, left eigenvectors are returned;
6614  * 3, both left and right eigenvectors are returned.
6615 
6616 Output parameters:
6617  WR - real parts of eigenvalues.
6618  Array whose index ranges within [0..N-1].
6619  WR - imaginary parts of eigenvalues.
6620  Array whose index ranges within [0..N-1].
6621  VL, VR - arrays of left and right eigenvectors (if they are needed).
6622  If WI[i]=0, the respective eigenvalue is a real number,
6623  and it corresponds to the column number I of matrices VL/VR.
6624  If WI[i]>0, we have a pair of complex conjugate numbers with
6625  positive and negative imaginary parts:
6626  the first eigenvalue WR[i] + sqrt(-1)*WI[i];
6627  the second eigenvalue WR[i+1] + sqrt(-1)*WI[i+1];
6628  WI[i]>0
6629  WI[i+1] = -WI[i] < 0
6630  In that case, the eigenvector corresponding to the first
6631  eigenvalue is located in i and i+1 columns of matrices
6632  VL/VR (the column number i contains the real part, and the
6633  column number i+1 contains the imaginary part), and the vector
6634  corresponding to the second eigenvalue is a complex conjugate to
6635  the first vector.
6636  Arrays whose indexes range within [0..N-1, 0..N-1].
6637 
6638 Result:
6639  True, if the algorithm has converged.
6640  False, if the algorithm has not converged.
6641 
6642 Note 1:
6643  Some users may ask the following question: what if WI[N-1]>0?
6644  WI[N] must contain an eigenvalue which is complex conjugate to the
6645  N-th eigenvalue, but the array has only size N?
6646  The answer is as follows: such a situation cannot occur because the
6647  algorithm finds a pairs of eigenvalues, therefore, if WI[i]>0, I is
6648  strictly less than N-1.
6649 
6650 Note 2:
6651  The algorithm performance depends on the value of the internal parameter
6652  NS of the InternalSchurDecomposition subroutine which defines the number
6653  of shifts in the QR algorithm (similarly to the block width in block-matrix
6654  algorithms of linear algebra). If you require maximum performance
6655  on your machine, it is recommended to adjust this parameter manually.
6656 
6657 
6658 See also the InternalTREVC subroutine.
6659 
6660 The algorithm is based on the LAPACK 3.0 library.
6661 *************************************************************************/
6662 bool rmatrixevd(const real_2d_array &a, const ae_int_t n, const ae_int_t vneeded, real_1d_array &wr, real_1d_array &wi, real_2d_array &vl, real_2d_array &vr);
6663 
6664 /*************************************************************************
6665 Subroutine performing the Schur decomposition of a general matrix by using
6666 the QR algorithm with multiple shifts.
6667 
6668 COMMERCIAL EDITION OF ALGLIB:
6669 
6670  ! Commercial version of ALGLIB includes one important improvement of
6671  ! this function, which can be used from C++ and C#:
6672  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
6673  !
6674  ! Intel MKL gives approximately constant (with respect to number of
6675  ! worker threads) acceleration factor which depends on CPU being used,
6676  ! problem size and "baseline" ALGLIB edition which is used for
6677  ! comparison.
6678  !
6679  ! Multithreaded acceleration is NOT supported for this function.
6680  !
6681  ! We recommend you to read 'Working with commercial version' section of
6682  ! ALGLIB Reference Manual in order to find out how to use performance-
6683  ! related features provided by commercial edition of ALGLIB.
6684 
6685 The source matrix A is represented as S'*A*S = T, where S is an orthogonal
6686 matrix (Schur vectors), T - upper quasi-triangular matrix (with blocks of
6687 sizes 1x1 and 2x2 on the main diagonal).
6688 
6689 Input parameters:
6690  A - matrix to be decomposed.
6691  Array whose indexes range within [0..N-1, 0..N-1].
6692  N - size of A, N>=0.
6693 
6694 
6695 Output parameters:
6696  A - contains matrix T.
6697  Array whose indexes range within [0..N-1, 0..N-1].
6698  S - contains Schur vectors.
6699  Array whose indexes range within [0..N-1, 0..N-1].
6700 
6701 Note 1:
6702  The block structure of matrix T can be easily recognized: since all
6703  the elements below the blocks are zeros, the elements a[i+1,i] which
6704  are equal to 0 show the block border.
6705 
6706 Note 2:
6707  The algorithm performance depends on the value of the internal parameter
6708  NS of the InternalSchurDecomposition subroutine which defines the number
6709  of shifts in the QR algorithm (similarly to the block width in block-matrix
6710  algorithms in linear algebra). If you require maximum performance on
6711  your machine, it is recommended to adjust this parameter manually.
6712 
6713 Result:
6714  True,
6715  if the algorithm has converged and parameters A and S contain the result.
6716  False,
6717  if the algorithm has not converged.
6718 
6719 Algorithm implemented on the basis of the DHSEQR subroutine (LAPACK 3.0 library).
6720 *************************************************************************/
6722 
6723 /*************************************************************************
6724 Algorithm for solving the following generalized symmetric positive-definite
6725 eigenproblem:
6726  A*x = lambda*B*x (1) or
6727  A*B*x = lambda*x (2) or
6728  B*A*x = lambda*x (3).
6729 where A is a symmetric matrix, B - symmetric positive-definite matrix.
6730 The problem is solved by reducing it to an ordinary symmetric eigenvalue
6731 problem.
6732 
6733 Input parameters:
6734  A - symmetric matrix which is given by its upper or lower
6735  triangular part.
6736  Array whose indexes range within [0..N-1, 0..N-1].
6737  N - size of matrices A and B.
6738  IsUpperA - storage format of matrix A.
6739  B - symmetric positive-definite matrix which is given by
6740  its upper or lower triangular part.
6741  Array whose indexes range within [0..N-1, 0..N-1].
6742  IsUpperB - storage format of matrix B.
6743  ZNeeded - if ZNeeded is equal to:
6744  * 0, the eigenvectors are not returned;
6745  * 1, the eigenvectors are returned.
6746  ProblemType - if ProblemType is equal to:
6747  * 1, the following problem is solved: A*x = lambda*B*x;
6748  * 2, the following problem is solved: A*B*x = lambda*x;
6749  * 3, the following problem is solved: B*A*x = lambda*x.
6750 
6751 Output parameters:
6752  D - eigenvalues in ascending order.
6753  Array whose index ranges within [0..N-1].
6754  Z - if ZNeeded is equal to:
6755  * 0, Z hasn't changed;
6756  * 1, Z contains eigenvectors.
6757  Array whose indexes range within [0..N-1, 0..N-1].
6758  The eigenvectors are stored in matrix columns. It should
6759  be noted that the eigenvectors in such problems do not
6760  form an orthogonal system.
6761 
6762 Result:
6763  True, if the problem was solved successfully.
6764  False, if the error occurred during the Cholesky decomposition of matrix
6765  B (the matrix isn't positive-definite) or during the work of the iterative
6766  algorithm for solving the symmetric eigenproblem.
6767 
6768 See also the GeneralizedSymmetricDefiniteEVDReduce subroutine.
6769 
6770  -- ALGLIB --
6771  Copyright 1.28.2006 by Bochkanov Sergey
6772 *************************************************************************/
6773 bool smatrixgevd(const real_2d_array &a, const ae_int_t n, const bool isuppera, const real_2d_array &b, const bool isupperb, const ae_int_t zneeded, const ae_int_t problemtype, real_1d_array &d, real_2d_array &z);
6774 
6775 
6776 /*************************************************************************
6777 Algorithm for reduction of the following generalized symmetric positive-
6778 definite eigenvalue problem:
6779  A*x = lambda*B*x (1) or
6780  A*B*x = lambda*x (2) or
6781  B*A*x = lambda*x (3)
6782 to the symmetric eigenvalues problem C*y = lambda*y (eigenvalues of this and
6783 the given problems are the same, and the eigenvectors of the given problem
6784 could be obtained by multiplying the obtained eigenvectors by the
6785 transformation matrix x = R*y).
6786 
6787 Here A is a symmetric matrix, B - symmetric positive-definite matrix.
6788 
6789 Input parameters:
6790  A - symmetric matrix which is given by its upper or lower
6791  triangular part.
6792  Array whose indexes range within [0..N-1, 0..N-1].
6793  N - size of matrices A and B.
6794  IsUpperA - storage format of matrix A.
6795  B - symmetric positive-definite matrix which is given by
6796  its upper or lower triangular part.
6797  Array whose indexes range within [0..N-1, 0..N-1].
6798  IsUpperB - storage format of matrix B.
6799  ProblemType - if ProblemType is equal to:
6800  * 1, the following problem is solved: A*x = lambda*B*x;
6801  * 2, the following problem is solved: A*B*x = lambda*x;
6802  * 3, the following problem is solved: B*A*x = lambda*x.
6803 
6804 Output parameters:
6805  A - symmetric matrix which is given by its upper or lower
6806  triangle depending on IsUpperA. Contains matrix C.
6807  Array whose indexes range within [0..N-1, 0..N-1].
6808  R - upper triangular or low triangular transformation matrix
6809  which is used to obtain the eigenvectors of a given problem
6810  as the product of eigenvectors of C (from the right) and
6811  matrix R (from the left). If the matrix is upper
6812  triangular, the elements below the main diagonal
6813  are equal to 0 (and vice versa). Thus, we can perform
6814  the multiplication without taking into account the
6815  internal structure (which is an easier though less
6816  effective way).
6817  Array whose indexes range within [0..N-1, 0..N-1].
6818  IsUpperR - type of matrix R (upper or lower triangular).
6819 
6820 Result:
6821  True, if the problem was reduced successfully.
6822  False, if the error occurred during the Cholesky decomposition of
6823  matrix B (the matrix is not positive-definite).
6824 
6825  -- ALGLIB --
6826  Copyright 1.28.2006 by Bochkanov Sergey
6827 *************************************************************************/
6828 bool smatrixgevdreduce(real_2d_array &a, const ae_int_t n, const bool isuppera, const real_2d_array &b, const bool isupperb, const ae_int_t problemtype, real_2d_array &r, bool &isupperr);
6829 
6830 /*************************************************************************
6831 Inverse matrix update by the Sherman-Morrison formula
6832 
6833 The algorithm updates matrix A^-1 when adding a number to an element
6834 of matrix A.
6835 
6836 Input parameters:
6837  InvA - inverse of matrix A.
6838  Array whose indexes range within [0..N-1, 0..N-1].
6839  N - size of matrix A.
6840  UpdRow - row where the element to be updated is stored.
6841  UpdColumn - column where the element to be updated is stored.
6842  UpdVal - a number to be added to the element.
6843 
6844 
6845 Output parameters:
6846  InvA - inverse of modified matrix A.
6847 
6848  -- ALGLIB --
6849  Copyright 2005 by Bochkanov Sergey
6850 *************************************************************************/
6851 void rmatrixinvupdatesimple(real_2d_array &inva, const ae_int_t n, const ae_int_t updrow, const ae_int_t updcolumn, const double updval);
6852 
6853 
6854 /*************************************************************************
6855 Inverse matrix update by the Sherman-Morrison formula
6856 
6857 The algorithm updates matrix A^-1 when adding a vector to a row
6858 of matrix A.
6859 
6860 Input parameters:
6861  InvA - inverse of matrix A.
6862  Array whose indexes range within [0..N-1, 0..N-1].
6863  N - size of matrix A.
6864  UpdRow - the row of A whose vector V was added.
6865  0 <= Row <= N-1
6866  V - the vector to be added to a row.
6867  Array whose index ranges within [0..N-1].
6868 
6869 Output parameters:
6870  InvA - inverse of modified matrix A.
6871 
6872  -- ALGLIB --
6873  Copyright 2005 by Bochkanov Sergey
6874 *************************************************************************/
6875 void rmatrixinvupdaterow(real_2d_array &inva, const ae_int_t n, const ae_int_t updrow, const real_1d_array &v);
6876 
6877 
6878 /*************************************************************************
6879 Inverse matrix update by the Sherman-Morrison formula
6880 
6881 The algorithm updates matrix A^-1 when adding a vector to a column
6882 of matrix A.
6883 
6884 Input parameters:
6885  InvA - inverse of matrix A.
6886  Array whose indexes range within [0..N-1, 0..N-1].
6887  N - size of matrix A.
6888  UpdColumn - the column of A whose vector U was added.
6889  0 <= UpdColumn <= N-1
6890  U - the vector to be added to a column.
6891  Array whose index ranges within [0..N-1].
6892 
6893 Output parameters:
6894  InvA - inverse of modified matrix A.
6895 
6896  -- ALGLIB --
6897  Copyright 2005 by Bochkanov Sergey
6898 *************************************************************************/
6899 void rmatrixinvupdatecolumn(real_2d_array &inva, const ae_int_t n, const ae_int_t updcolumn, const real_1d_array &u);
6900 
6901 
6902 /*************************************************************************
6903 Inverse matrix update by the Sherman-Morrison formula
6904 
6905 The algorithm computes the inverse of matrix A+u*v' by using the given matrix
6906 A^-1 and the vectors u and v.
6907 
6908 Input parameters:
6909  InvA - inverse of matrix A.
6910  Array whose indexes range within [0..N-1, 0..N-1].
6911  N - size of matrix A.
6912  U - the vector modifying the matrix.
6913  Array whose index ranges within [0..N-1].
6914  V - the vector modifying the matrix.
6915  Array whose index ranges within [0..N-1].
6916 
6917 Output parameters:
6918  InvA - inverse of matrix A + u*v'.
6919 
6920  -- ALGLIB --
6921  Copyright 2005 by Bochkanov Sergey
6922 *************************************************************************/
6923 void rmatrixinvupdateuv(real_2d_array &inva, const ae_int_t n, const real_1d_array &u, const real_1d_array &v);
6924 
6925 /*************************************************************************
6926 Determinant calculation of the matrix given by its LU decomposition.
6927 
6928 Input parameters:
6929  A - LU decomposition of the matrix (output of
6930  RMatrixLU subroutine).
6931  Pivots - table of permutations which were made during
6932  the LU decomposition.
6933  Output of RMatrixLU subroutine.
6934  N - (optional) size of matrix A:
6935  * if given, only principal NxN submatrix is processed and
6936  overwritten. other elements are unchanged.
6937  * if not given, automatically determined from matrix size
6938  (A must be square matrix)
6939 
6940 Result: matrix determinant.
6941 
6942  -- ALGLIB --
6943  Copyright 2005 by Bochkanov Sergey
6944 *************************************************************************/
6945 double rmatrixludet(const real_2d_array &a, const integer_1d_array &pivots, const ae_int_t n);
6946 double rmatrixludet(const real_2d_array &a, const integer_1d_array &pivots);
6947 
6948 
6949 /*************************************************************************
6950 Calculation of the determinant of a general matrix
6951 
6952 Input parameters:
6953  A - matrix, array[0..N-1, 0..N-1]
6954  N - (optional) size of matrix A:
6955  * if given, only principal NxN submatrix is processed and
6956  overwritten. other elements are unchanged.
6957  * if not given, automatically determined from matrix size
6958  (A must be square matrix)
6959 
6960 Result: determinant of matrix A.
6961 
6962  -- ALGLIB --
6963  Copyright 2005 by Bochkanov Sergey
6964 *************************************************************************/
6965 double rmatrixdet(const real_2d_array &a, const ae_int_t n);
6966 double rmatrixdet(const real_2d_array &a);
6967 
6968 
6969 /*************************************************************************
6970 Determinant calculation of the matrix given by its LU decomposition.
6971 
6972 Input parameters:
6973  A - LU decomposition of the matrix (output of
6974  RMatrixLU subroutine).
6975  Pivots - table of permutations which were made during
6976  the LU decomposition.
6977  Output of RMatrixLU subroutine.
6978  N - (optional) size of matrix A:
6979  * if given, only principal NxN submatrix is processed and
6980  overwritten. other elements are unchanged.
6981  * if not given, automatically determined from matrix size
6982  (A must be square matrix)
6983 
6984 Result: matrix determinant.
6985 
6986  -- ALGLIB --
6987  Copyright 2005 by Bochkanov Sergey
6988 *************************************************************************/
6991 
6992 
6993 /*************************************************************************
6994 Calculation of the determinant of a general matrix
6995 
6996 Input parameters:
6997  A - matrix, array[0..N-1, 0..N-1]
6998  N - (optional) size of matrix A:
6999  * if given, only principal NxN submatrix is processed and
7000  overwritten. other elements are unchanged.
7001  * if not given, automatically determined from matrix size
7002  (A must be square matrix)
7003 
7004 Result: determinant of matrix A.
7005 
7006  -- ALGLIB --
7007  Copyright 2005 by Bochkanov Sergey
7008 *************************************************************************/
7011 
7012 
7013 /*************************************************************************
7014 Determinant calculation of the matrix given by the Cholesky decomposition.
7015 
7016 Input parameters:
7017  A - Cholesky decomposition,
7018  output of SMatrixCholesky subroutine.
7019  N - (optional) size of matrix A:
7020  * if given, only principal NxN submatrix is processed and
7021  overwritten. other elements are unchanged.
7022  * if not given, automatically determined from matrix size
7023  (A must be square matrix)
7024 
7025 As the determinant is equal to the product of squares of diagonal elements,
7026 it's not necessary to specify which triangle - lower or upper - the matrix
7027 is stored in.
7028 
7029 Result:
7030  matrix determinant.
7031 
7032  -- ALGLIB --
7033  Copyright 2005-2008 by Bochkanov Sergey
7034 *************************************************************************/
7035 double spdmatrixcholeskydet(const real_2d_array &a, const ae_int_t n);
7037 
7038 
7039 /*************************************************************************
7040 Determinant calculation of the symmetric positive definite matrix.
7041 
7042 Input parameters:
7043  A - matrix. Array with elements [0..N-1, 0..N-1].
7044  N - (optional) size of matrix A:
7045  * if given, only principal NxN submatrix is processed and
7046  overwritten. other elements are unchanged.
7047  * if not given, automatically determined from matrix size
7048  (A must be square matrix)
7049  IsUpper - (optional) storage type:
7050  * if True, symmetric matrix A is given by its upper
7051  triangle, and the lower triangle isn't used/changed by
7052  function
7053  * if False, symmetric matrix A is given by its lower
7054  triangle, and the upper triangle isn't used/changed by
7055  function
7056  * if not given, both lower and upper triangles must be
7057  filled.
7058 
7059 Result:
7060  determinant of matrix A.
7061  If matrix A is not positive definite, exception is thrown.
7062 
7063  -- ALGLIB --
7064  Copyright 2005-2008 by Bochkanov Sergey
7065 *************************************************************************/
7066 double spdmatrixdet(const real_2d_array &a, const ae_int_t n, const bool isupper);
7067 double spdmatrixdet(const real_2d_array &a);
7068 }
7069 
7071 //
7072 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
7073 //
7075 namespace alglib_impl
7076 {
7078  ae_int_t n,
7079  ae_int_t k,
7080  sparsematrix* s,
7081  ae_state *_state);
7083  ae_int_t n,
7084  ae_int_t k,
7085  sparsematrix* s,
7086  ae_state *_state);
7088  ae_int_t n,
7089  /* Integer */ ae_vector* ner,
7090  sparsematrix* s,
7091  ae_state *_state);
7093  ae_int_t n,
7094  /* Integer */ ae_vector* ner,
7095  sparsematrix* s,
7096  ae_state *_state);
7098  ae_int_t n,
7099  /* Integer */ ae_vector* d,
7100  /* Integer */ ae_vector* u,
7101  sparsematrix* s,
7102  ae_state *_state);
7104  ae_int_t n,
7105  /* Integer */ ae_vector* d,
7106  /* Integer */ ae_vector* u,
7107  sparsematrix* s,
7108  ae_state *_state);
7113  ae_int_t i,
7114  ae_int_t j,
7115  double v,
7116  ae_state *_state);
7118  ae_int_t i,
7119  ae_int_t j,
7120  double v,
7121  ae_state *_state);
7123  ae_int_t i,
7124  ae_int_t j,
7125  ae_state *_state);
7128  /* Real */ ae_vector* x,
7129  /* Real */ ae_vector* y,
7130  ae_state *_state);
7132  /* Real */ ae_vector* x,
7133  /* Real */ ae_vector* y,
7134  ae_state *_state);
7136  /* Real */ ae_vector* x,
7137  /* Real */ ae_vector* y0,
7138  /* Real */ ae_vector* y1,
7139  ae_state *_state);
7141  ae_bool isupper,
7142  /* Real */ ae_vector* x,
7143  /* Real */ ae_vector* y,
7144  ae_state *_state);
7146  ae_bool isupper,
7147  /* Real */ ae_vector* x,
7148  ae_state *_state);
7150  /* Real */ ae_matrix* a,
7151  ae_int_t k,
7152  /* Real */ ae_matrix* b,
7153  ae_state *_state);
7155  /* Real */ ae_matrix* a,
7156  ae_int_t k,
7157  /* Real */ ae_matrix* b,
7158  ae_state *_state);
7160  /* Real */ ae_matrix* a,
7161  ae_int_t k,
7162  /* Real */ ae_matrix* b0,
7163  /* Real */ ae_matrix* b1,
7164  ae_state *_state);
7166  ae_bool isupper,
7167  /* Real */ ae_matrix* a,
7168  ae_int_t k,
7169  /* Real */ ae_matrix* b,
7170  ae_state *_state);
7172  ae_bool isupper,
7173  ae_bool isunit,
7174  ae_int_t optype,
7175  /* Real */ ae_vector* x,
7176  /* Real */ ae_vector* y,
7177  ae_state *_state);
7179  ae_bool isupper,
7180  ae_bool isunit,
7181  ae_int_t optype,
7182  /* Real */ ae_vector* x,
7183  ae_state *_state);
7187  ae_int_t* t0,
7188  ae_int_t* t1,
7189  ae_int_t* i,
7190  ae_int_t* j,
7191  double* v,
7192  ae_state *_state);
7194  ae_int_t i,
7195  ae_int_t j,
7196  double v,
7197  ae_state *_state);
7199  ae_int_t i,
7200  /* Real */ ae_vector* irow,
7201  ae_state *_state);
7203  ae_int_t i,
7204  /* Integer */ ae_vector* colidx,
7205  /* Real */ ae_vector* vals,
7206  ae_int_t* nzcnt,
7207  ae_state *_state);
7211  ae_int_t fmt,
7212  sparsematrix* s1,
7213  ae_state *_state);
7216  sparsematrix* s1,
7217  ae_state *_state);
7219  sparsematrix* s1,
7220  ae_state *_state);
7224  sparsematrix* s1,
7225  ae_state *_state);
7229  sparsematrix* s1,
7230  ae_state *_state);
7241 void _sparsematrix_init(void* _p, ae_state *_state);
7242 void _sparsematrix_init_copy(void* _dst, void* _src, ae_state *_state);
7243 void _sparsematrix_clear(void* _p);
7244 void _sparsematrix_destroy(void* _p);
7245 void _sparsebuffers_init(void* _p, ae_state *_state);
7246 void _sparsebuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7247 void _sparsebuffers_clear(void* _p);
7248 void _sparsebuffers_destroy(void* _p);
7250  /* Real */ ae_matrix* a,
7251  ae_state *_state);
7253  double c,
7254  /* Real */ ae_matrix* a,
7255  ae_state *_state);
7257  /* Complex */ ae_matrix* a,
7258  ae_state *_state);
7260  double c,
7261  /* Complex */ ae_matrix* a,
7262  ae_state *_state);
7264  double c,
7265  /* Real */ ae_matrix* a,
7266  ae_state *_state);
7268  double c,
7269  /* Real */ ae_matrix* a,
7270  ae_state *_state);
7272  double c,
7273  /* Complex */ ae_matrix* a,
7274  ae_state *_state);
7276  double c,
7277  /* Complex */ ae_matrix* a,
7278  ae_state *_state);
7280  ae_int_t m,
7281  ae_int_t n,
7282  ae_state *_state);
7284  ae_int_t m,
7285  ae_int_t n,
7286  ae_state *_state);
7288  ae_int_t m,
7289  ae_int_t n,
7290  ae_state *_state);
7292  ae_int_t m,
7293  ae_int_t n,
7294  ae_state *_state);
7295 void smatrixrndmultiply(/* Real */ ae_matrix* a,
7296  ae_int_t n,
7297  ae_state *_state);
7298 void hmatrixrndmultiply(/* Complex */ ae_matrix* a,
7299  ae_int_t n,
7300  ae_state *_state);
7301 void ablassplitlength(/* Real */ ae_matrix* a,
7302  ae_int_t n,
7303  ae_int_t* n1,
7304  ae_int_t* n2,
7305  ae_state *_state);
7306 void ablascomplexsplitlength(/* Complex */ ae_matrix* a,
7307  ae_int_t n,
7308  ae_int_t* n1,
7309  ae_int_t* n2,
7310  ae_state *_state);
7311 ae_int_t ablasblocksize(/* Real */ ae_matrix* a, ae_state *_state);
7313  ae_state *_state);
7316  ae_int_t n,
7317  /* Complex */ ae_matrix* a,
7318  ae_int_t ia,
7319  ae_int_t ja,
7320  /* Complex */ ae_matrix* b,
7321  ae_int_t ib,
7322  ae_int_t jb,
7323  ae_state *_state);
7325  ae_int_t n,
7326  /* Real */ ae_matrix* a,
7327  ae_int_t ia,
7328  ae_int_t ja,
7329  /* Real */ ae_matrix* b,
7330  ae_int_t ib,
7331  ae_int_t jb,
7332  ae_state *_state);
7334  ae_int_t n,
7335  ae_bool isupper,
7336  ae_state *_state);
7338  ae_int_t n,
7339  /* Complex */ ae_matrix* a,
7340  ae_int_t ia,
7341  ae_int_t ja,
7342  /* Complex */ ae_matrix* b,
7343  ae_int_t ib,
7344  ae_int_t jb,
7345  ae_state *_state);
7347  ae_int_t n,
7348  /* Real */ ae_matrix* a,
7349  ae_int_t ia,
7350  ae_int_t ja,
7351  /* Real */ ae_matrix* b,
7352  ae_int_t ib,
7353  ae_int_t jb,
7354  ae_state *_state);
7356  ae_int_t n,
7357  /* Complex */ ae_matrix* a,
7358  ae_int_t ia,
7359  ae_int_t ja,
7360  /* Complex */ ae_vector* u,
7361  ae_int_t iu,
7362  /* Complex */ ae_vector* v,
7363  ae_int_t iv,
7364  ae_state *_state);
7366  ae_int_t n,
7367  /* Real */ ae_matrix* a,
7368  ae_int_t ia,
7369  ae_int_t ja,
7370  /* Real */ ae_vector* u,
7371  ae_int_t iu,
7372  /* Real */ ae_vector* v,
7373  ae_int_t iv,
7374  ae_state *_state);
7376  ae_int_t n,
7377  /* Complex */ ae_matrix* a,
7378  ae_int_t ia,
7379  ae_int_t ja,
7380  ae_int_t opa,
7381  /* Complex */ ae_vector* x,
7382  ae_int_t ix,
7383  /* Complex */ ae_vector* y,
7384  ae_int_t iy,
7385  ae_state *_state);
7387  ae_int_t n,
7388  /* Real */ ae_matrix* a,
7389  ae_int_t ia,
7390  ae_int_t ja,
7391  ae_int_t opa,
7392  /* Real */ ae_vector* x,
7393  ae_int_t ix,
7394  /* Real */ ae_vector* y,
7395  ae_int_t iy,
7396  ae_state *_state);
7398  ae_int_t n,
7399  /* Complex */ ae_matrix* a,
7400  ae_int_t i1,
7401  ae_int_t j1,
7402  ae_bool isupper,
7403  ae_bool isunit,
7404  ae_int_t optype,
7405  /* Complex */ ae_matrix* x,
7406  ae_int_t i2,
7407  ae_int_t j2,
7408  ae_state *_state);
7410  ae_int_t n,
7411  /* Complex */ ae_matrix* a,
7412  ae_int_t i1,
7413  ae_int_t j1,
7414  ae_bool isupper,
7415  ae_bool isunit,
7416  ae_int_t optype,
7417  /* Complex */ ae_matrix* x,
7418  ae_int_t i2,
7419  ae_int_t j2, ae_state *_state);
7421  ae_int_t n,
7422  /* Complex */ ae_matrix* a,
7423  ae_int_t i1,
7424  ae_int_t j1,
7425  ae_bool isupper,
7426  ae_bool isunit,
7427  ae_int_t optype,
7428  /* Complex */ ae_matrix* x,
7429  ae_int_t i2,
7430  ae_int_t j2,
7431  ae_state *_state);
7433  ae_int_t n,
7434  /* Complex */ ae_matrix* a,
7435  ae_int_t i1,
7436  ae_int_t j1,
7437  ae_bool isupper,
7438  ae_bool isunit,
7439  ae_int_t optype,
7440  /* Complex */ ae_matrix* x,
7441  ae_int_t i2,
7442  ae_int_t j2, ae_state *_state);
7444  ae_int_t n,
7445  /* Real */ ae_matrix* a,
7446  ae_int_t i1,
7447  ae_int_t j1,
7448  ae_bool isupper,
7449  ae_bool isunit,
7450  ae_int_t optype,
7451  /* Real */ ae_matrix* x,
7452  ae_int_t i2,
7453  ae_int_t j2,
7454  ae_state *_state);
7456  ae_int_t n,
7457  /* Real */ ae_matrix* a,
7458  ae_int_t i1,
7459  ae_int_t j1,
7460  ae_bool isupper,
7461  ae_bool isunit,
7462  ae_int_t optype,
7463  /* Real */ ae_matrix* x,
7464  ae_int_t i2,
7465  ae_int_t j2, ae_state *_state);
7467  ae_int_t n,
7468  /* Real */ ae_matrix* a,
7469  ae_int_t i1,
7470  ae_int_t j1,
7471  ae_bool isupper,
7472  ae_bool isunit,
7473  ae_int_t optype,
7474  /* Real */ ae_matrix* x,
7475  ae_int_t i2,
7476  ae_int_t j2,
7477  ae_state *_state);
7479  ae_int_t n,
7480  /* Real */ ae_matrix* a,
7481  ae_int_t i1,
7482  ae_int_t j1,
7483  ae_bool isupper,
7484  ae_bool isunit,
7485  ae_int_t optype,
7486  /* Real */ ae_matrix* x,
7487  ae_int_t i2,
7488  ae_int_t j2, ae_state *_state);
7490  ae_int_t k,
7491  double alpha,
7492  /* Complex */ ae_matrix* a,
7493  ae_int_t ia,
7494  ae_int_t ja,
7495  ae_int_t optypea,
7496  double beta,
7497  /* Complex */ ae_matrix* c,
7498  ae_int_t ic,
7499  ae_int_t jc,
7500  ae_bool isupper,
7501  ae_state *_state);
7503  ae_int_t k,
7504  double alpha,
7505  /* Complex */ ae_matrix* a,
7506  ae_int_t ia,
7507  ae_int_t ja,
7508  ae_int_t optypea,
7509  double beta,
7510  /* Complex */ ae_matrix* c,
7511  ae_int_t ic,
7512  ae_int_t jc,
7513  ae_bool isupper, ae_state *_state);
7515  ae_int_t k,
7516  double alpha,
7517  /* Real */ ae_matrix* a,
7518  ae_int_t ia,
7519  ae_int_t ja,
7520  ae_int_t optypea,
7521  double beta,
7522  /* Real */ ae_matrix* c,
7523  ae_int_t ic,
7524  ae_int_t jc,
7525  ae_bool isupper,
7526  ae_state *_state);
7528  ae_int_t k,
7529  double alpha,
7530  /* Real */ ae_matrix* a,
7531  ae_int_t ia,
7532  ae_int_t ja,
7533  ae_int_t optypea,
7534  double beta,
7535  /* Real */ ae_matrix* c,
7536  ae_int_t ic,
7537  ae_int_t jc,
7538  ae_bool isupper, ae_state *_state);
7540  ae_int_t n,
7541  ae_int_t k,
7542  ae_complex alpha,
7543  /* Complex */ ae_matrix* a,
7544  ae_int_t ia,
7545  ae_int_t ja,
7546  ae_int_t optypea,
7547  /* Complex */ ae_matrix* b,
7548  ae_int_t ib,
7549  ae_int_t jb,
7550  ae_int_t optypeb,
7551  ae_complex beta,
7552  /* Complex */ ae_matrix* c,
7553  ae_int_t ic,
7554  ae_int_t jc,
7555  ae_state *_state);
7557  ae_int_t n,
7558  ae_int_t k,
7559  ae_complex alpha,
7560  /* Complex */ ae_matrix* a,
7561  ae_int_t ia,
7562  ae_int_t ja,
7563  ae_int_t optypea,
7564  /* Complex */ ae_matrix* b,
7565  ae_int_t ib,
7566  ae_int_t jb,
7567  ae_int_t optypeb,
7568  ae_complex beta,
7569  /* Complex */ ae_matrix* c,
7570  ae_int_t ic,
7571  ae_int_t jc, ae_state *_state);
7573  ae_int_t n,
7574  ae_int_t k,
7575  double alpha,
7576  /* Real */ ae_matrix* a,
7577  ae_int_t ia,
7578  ae_int_t ja,
7579  ae_int_t optypea,
7580  /* Real */ ae_matrix* b,
7581  ae_int_t ib,
7582  ae_int_t jb,
7583  ae_int_t optypeb,
7584  double beta,
7585  /* Real */ ae_matrix* c,
7586  ae_int_t ic,
7587  ae_int_t jc,
7588  ae_state *_state);
7590  ae_int_t n,
7591  ae_int_t k,
7592  double alpha,
7593  /* Real */ ae_matrix* a,
7594  ae_int_t ia,
7595  ae_int_t ja,
7596  ae_int_t optypea,
7597  /* Real */ ae_matrix* b,
7598  ae_int_t ib,
7599  ae_int_t jb,
7600  ae_int_t optypeb,
7601  double beta,
7602  /* Real */ ae_matrix* c,
7603  ae_int_t ic,
7604  ae_int_t jc, ae_state *_state);
7606  ae_int_t k,
7607  double alpha,
7608  /* Complex */ ae_matrix* a,
7609  ae_int_t ia,
7610  ae_int_t ja,
7611  ae_int_t optypea,
7612  double beta,
7613  /* Complex */ ae_matrix* c,
7614  ae_int_t ic,
7615  ae_int_t jc,
7616  ae_bool isupper,
7617  ae_state *_state);
7619  ae_int_t k,
7620  double alpha,
7621  /* Complex */ ae_matrix* a,
7622  ae_int_t ia,
7623  ae_int_t ja,
7624  ae_int_t optypea,
7625  double beta,
7626  /* Complex */ ae_matrix* c,
7627  ae_int_t ic,
7628  ae_int_t jc,
7629  ae_bool isupper, ae_state *_state);
7630 void rmatrixlu(/* Real */ ae_matrix* a,
7631  ae_int_t m,
7632  ae_int_t n,
7633  /* Integer */ ae_vector* pivots,
7634  ae_state *_state);
7635 void _pexec_rmatrixlu(/* Real */ ae_matrix* a,
7636  ae_int_t m,
7637  ae_int_t n,
7638  /* Integer */ ae_vector* pivots, ae_state *_state);
7639 void cmatrixlu(/* Complex */ ae_matrix* a,
7640  ae_int_t m,
7641  ae_int_t n,
7642  /* Integer */ ae_vector* pivots,
7643  ae_state *_state);
7644 void _pexec_cmatrixlu(/* Complex */ ae_matrix* a,
7645  ae_int_t m,
7646  ae_int_t n,
7647  /* Integer */ ae_vector* pivots, ae_state *_state);
7649  ae_int_t n,
7650  ae_bool isupper,
7651  ae_state *_state);
7653  ae_int_t n,
7654  ae_bool isupper, ae_state *_state);
7656  ae_int_t n,
7657  ae_bool isupper,
7658  ae_state *_state);
7660  ae_int_t n,
7661  ae_bool isupper, ae_state *_state);
7663  ae_int_t n,
7664  ae_bool isupper,
7665  /* Real */ ae_vector* u,
7666  ae_state *_state);
7668  ae_int_t n,
7669  ae_bool isupper,
7670  /* Boolean */ ae_vector* fix,
7671  ae_state *_state);
7673  ae_int_t n,
7674  ae_bool isupper,
7675  /* Real */ ae_vector* u,
7676  /* Real */ ae_vector* bufr,
7677  ae_state *_state);
7679  ae_int_t n,
7680  ae_bool isupper,
7681  /* Boolean */ ae_vector* fix,
7682  /* Real */ ae_vector* bufr,
7683  ae_state *_state);
7685  ae_int_t n,
7686  ae_bool isupper,
7687  ae_state *_state);
7689  ae_int_t n,
7690  ae_bool isupper,
7691  /* Integer */ ae_vector* p0,
7692  /* Integer */ ae_vector* p1,
7693  ae_int_t ordering,
7694  ae_int_t algo,
7695  ae_int_t fmt,
7696  sparsebuffers* buf,
7697  sparsematrix* c,
7698  ae_state *_state);
7699 void rmatrixlup(/* Real */ ae_matrix* a,
7700  ae_int_t m,
7701  ae_int_t n,
7702  /* Integer */ ae_vector* pivots,
7703  ae_state *_state);
7704 void cmatrixlup(/* Complex */ ae_matrix* a,
7705  ae_int_t m,
7706  ae_int_t n,
7707  /* Integer */ ae_vector* pivots,
7708  ae_state *_state);
7709 void rmatrixplu(/* Real */ ae_matrix* a,
7710  ae_int_t m,
7711  ae_int_t n,
7712  /* Integer */ ae_vector* pivots,
7713  ae_state *_state);
7714 void cmatrixplu(/* Complex */ ae_matrix* a,
7715  ae_int_t m,
7716  ae_int_t n,
7717  /* Integer */ ae_vector* pivots,
7718  ae_state *_state);
7720  ae_int_t offs,
7721  ae_int_t n,
7722  ae_bool isupper,
7723  /* Real */ ae_vector* tmp,
7724  ae_state *_state);
7725 double rmatrixrcond1(/* Real */ ae_matrix* a,
7726  ae_int_t n,
7727  ae_state *_state);
7728 double rmatrixrcondinf(/* Real */ ae_matrix* a,
7729  ae_int_t n,
7730  ae_state *_state);
7731 double spdmatrixrcond(/* Real */ ae_matrix* a,
7732  ae_int_t n,
7733  ae_bool isupper,
7734  ae_state *_state);
7735 double rmatrixtrrcond1(/* Real */ ae_matrix* a,
7736  ae_int_t n,
7737  ae_bool isupper,
7738  ae_bool isunit,
7739  ae_state *_state);
7740 double rmatrixtrrcondinf(/* Real */ ae_matrix* a,
7741  ae_int_t n,
7742  ae_bool isupper,
7743  ae_bool isunit,
7744  ae_state *_state);
7745 double hpdmatrixrcond(/* Complex */ ae_matrix* a,
7746  ae_int_t n,
7747  ae_bool isupper,
7748  ae_state *_state);
7749 double cmatrixrcond1(/* Complex */ ae_matrix* a,
7750  ae_int_t n,
7751  ae_state *_state);
7752 double cmatrixrcondinf(/* Complex */ ae_matrix* a,
7753  ae_int_t n,
7754  ae_state *_state);
7755 double rmatrixlurcond1(/* Real */ ae_matrix* lua,
7756  ae_int_t n,
7757  ae_state *_state);
7758 double rmatrixlurcondinf(/* Real */ ae_matrix* lua,
7759  ae_int_t n,
7760  ae_state *_state);
7761 double spdmatrixcholeskyrcond(/* Real */ ae_matrix* a,
7762  ae_int_t n,
7763  ae_bool isupper,
7764  ae_state *_state);
7765 double hpdmatrixcholeskyrcond(/* Complex */ ae_matrix* a,
7766  ae_int_t n,
7767  ae_bool isupper,
7768  ae_state *_state);
7769 double cmatrixlurcond1(/* Complex */ ae_matrix* lua,
7770  ae_int_t n,
7771  ae_state *_state);
7772 double cmatrixlurcondinf(/* Complex */ ae_matrix* lua,
7773  ae_int_t n,
7774  ae_state *_state);
7775 double cmatrixtrrcond1(/* Complex */ ae_matrix* a,
7776  ae_int_t n,
7777  ae_bool isupper,
7778  ae_bool isunit,
7779  ae_state *_state);
7780 double cmatrixtrrcondinf(/* Complex */ ae_matrix* a,
7781  ae_int_t n,
7782  ae_bool isupper,
7783  ae_bool isunit,
7784  ae_state *_state);
7785 double rcondthreshold(ae_state *_state);
7786 void rmatrixluinverse(/* Real */ ae_matrix* a,
7787  /* Integer */ ae_vector* pivots,
7788  ae_int_t n,
7789  ae_int_t* info,
7790  matinvreport* rep,
7791  ae_state *_state);
7793  /* Integer */ ae_vector* pivots,
7794  ae_int_t n,
7795  ae_int_t* info,
7796  matinvreport* rep, ae_state *_state);
7797 void rmatrixinverse(/* Real */ ae_matrix* a,
7798  ae_int_t n,
7799  ae_int_t* info,
7800  matinvreport* rep,
7801  ae_state *_state);
7803  ae_int_t n,
7804  ae_int_t* info,
7805  matinvreport* rep, ae_state *_state);
7806 void cmatrixluinverse(/* Complex */ ae_matrix* a,
7807  /* Integer */ ae_vector* pivots,
7808  ae_int_t n,
7809  ae_int_t* info,
7810  matinvreport* rep,
7811  ae_state *_state);
7812 void _pexec_cmatrixluinverse(/* Complex */ ae_matrix* a,
7813  /* Integer */ ae_vector* pivots,
7814  ae_int_t n,
7815  ae_int_t* info,
7816  matinvreport* rep, ae_state *_state);
7817 void cmatrixinverse(/* Complex */ ae_matrix* a,
7818  ae_int_t n,
7819  ae_int_t* info,
7820  matinvreport* rep,
7821  ae_state *_state);
7822 void _pexec_cmatrixinverse(/* Complex */ ae_matrix* a,
7823  ae_int_t n,
7824  ae_int_t* info,
7825  matinvreport* rep, ae_state *_state);
7827  ae_int_t n,
7828  ae_bool isupper,
7829  ae_int_t* info,
7830  matinvreport* rep,
7831  ae_state *_state);
7833  ae_int_t n,
7834  ae_bool isupper,
7835  ae_int_t* info,
7836  matinvreport* rep, ae_state *_state);
7837 void spdmatrixinverse(/* Real */ ae_matrix* a,
7838  ae_int_t n,
7839  ae_bool isupper,
7840  ae_int_t* info,
7841  matinvreport* rep,
7842  ae_state *_state);
7844  ae_int_t n,
7845  ae_bool isupper,
7846  ae_int_t* info,
7847  matinvreport* rep, ae_state *_state);
7848 void hpdmatrixcholeskyinverse(/* Complex */ ae_matrix* a,
7849  ae_int_t n,
7850  ae_bool isupper,
7851  ae_int_t* info,
7852  matinvreport* rep,
7853  ae_state *_state);
7855  ae_int_t n,
7856  ae_bool isupper,
7857  ae_int_t* info,
7858  matinvreport* rep, ae_state *_state);
7859 void hpdmatrixinverse(/* Complex */ ae_matrix* a,
7860  ae_int_t n,
7861  ae_bool isupper,
7862  ae_int_t* info,
7863  matinvreport* rep,
7864  ae_state *_state);
7865 void _pexec_hpdmatrixinverse(/* Complex */ ae_matrix* a,
7866  ae_int_t n,
7867  ae_bool isupper,
7868  ae_int_t* info,
7869  matinvreport* rep, ae_state *_state);
7870 void rmatrixtrinverse(/* Real */ ae_matrix* a,
7871  ae_int_t n,
7872  ae_bool isupper,
7873  ae_bool isunit,
7874  ae_int_t* info,
7875  matinvreport* rep,
7876  ae_state *_state);
7878  ae_int_t n,
7879  ae_bool isupper,
7880  ae_bool isunit,
7881  ae_int_t* info,
7882  matinvreport* rep, ae_state *_state);
7883 void cmatrixtrinverse(/* Complex */ ae_matrix* a,
7884  ae_int_t n,
7885  ae_bool isupper,
7886  ae_bool isunit,
7887  ae_int_t* info,
7888  matinvreport* rep,
7889  ae_state *_state);
7890 void _pexec_cmatrixtrinverse(/* Complex */ ae_matrix* a,
7891  ae_int_t n,
7892  ae_bool isupper,
7893  ae_bool isunit,
7894  ae_int_t* info,
7895  matinvreport* rep, ae_state *_state);
7897  ae_int_t offs,
7898  ae_int_t n,
7899  ae_bool isupper,
7900  /* Real */ ae_vector* tmp,
7901  ae_state *_state);
7902 void _matinvreport_init(void* _p, ae_state *_state);
7903 void _matinvreport_init_copy(void* _dst, void* _src, ae_state *_state);
7904 void _matinvreport_clear(void* _p);
7905 void _matinvreport_destroy(void* _p);
7906 void rmatrixqr(/* Real */ ae_matrix* a,
7907  ae_int_t m,
7908  ae_int_t n,
7909  /* Real */ ae_vector* tau,
7910  ae_state *_state);
7911 void _pexec_rmatrixqr(/* Real */ ae_matrix* a,
7912  ae_int_t m,
7913  ae_int_t n,
7914  /* Real */ ae_vector* tau, ae_state *_state);
7915 void rmatrixlq(/* Real */ ae_matrix* a,
7916  ae_int_t m,
7917  ae_int_t n,
7918  /* Real */ ae_vector* tau,
7919  ae_state *_state);
7920 void _pexec_rmatrixlq(/* Real */ ae_matrix* a,
7921  ae_int_t m,
7922  ae_int_t n,
7923  /* Real */ ae_vector* tau, ae_state *_state);
7924 void cmatrixqr(/* Complex */ ae_matrix* a,
7925  ae_int_t m,
7926  ae_int_t n,
7927  /* Complex */ ae_vector* tau,
7928  ae_state *_state);
7929 void _pexec_cmatrixqr(/* Complex */ ae_matrix* a,
7930  ae_int_t m,
7931  ae_int_t n,
7932  /* Complex */ ae_vector* tau, ae_state *_state);
7933 void cmatrixlq(/* Complex */ ae_matrix* a,
7934  ae_int_t m,
7935  ae_int_t n,
7936  /* Complex */ ae_vector* tau,
7937  ae_state *_state);
7938 void _pexec_cmatrixlq(/* Complex */ ae_matrix* a,
7939  ae_int_t m,
7940  ae_int_t n,
7941  /* Complex */ ae_vector* tau, ae_state *_state);
7942 void rmatrixqrunpackq(/* Real */ ae_matrix* a,
7943  ae_int_t m,
7944  ae_int_t n,
7945  /* Real */ ae_vector* tau,
7946  ae_int_t qcolumns,
7947  /* Real */ ae_matrix* q,
7948  ae_state *_state);
7950  ae_int_t m,
7951  ae_int_t n,
7952  /* Real */ ae_vector* tau,
7953  ae_int_t qcolumns,
7954  /* Real */ ae_matrix* q, ae_state *_state);
7955 void rmatrixqrunpackr(/* Real */ ae_matrix* a,
7956  ae_int_t m,
7957  ae_int_t n,
7958  /* Real */ ae_matrix* r,
7959  ae_state *_state);
7960 void rmatrixlqunpackq(/* Real */ ae_matrix* a,
7961  ae_int_t m,
7962  ae_int_t n,
7963  /* Real */ ae_vector* tau,
7964  ae_int_t qrows,
7965  /* Real */ ae_matrix* q,
7966  ae_state *_state);
7968  ae_int_t m,
7969  ae_int_t n,
7970  /* Real */ ae_vector* tau,
7971  ae_int_t qrows,
7972  /* Real */ ae_matrix* q, ae_state *_state);
7973 void rmatrixlqunpackl(/* Real */ ae_matrix* a,
7974  ae_int_t m,
7975  ae_int_t n,
7976  /* Real */ ae_matrix* l,
7977  ae_state *_state);
7978 void cmatrixqrunpackq(/* Complex */ ae_matrix* a,
7979  ae_int_t m,
7980  ae_int_t n,
7981  /* Complex */ ae_vector* tau,
7982  ae_int_t qcolumns,
7983  /* Complex */ ae_matrix* q,
7984  ae_state *_state);
7985 void _pexec_cmatrixqrunpackq(/* Complex */ ae_matrix* a,
7986  ae_int_t m,
7987  ae_int_t n,
7988  /* Complex */ ae_vector* tau,
7989  ae_int_t qcolumns,
7990  /* Complex */ ae_matrix* q, ae_state *_state);
7991 void cmatrixqrunpackr(/* Complex */ ae_matrix* a,
7992  ae_int_t m,
7993  ae_int_t n,
7994  /* Complex */ ae_matrix* r,
7995  ae_state *_state);
7996 void cmatrixlqunpackq(/* Complex */ ae_matrix* a,
7997  ae_int_t m,
7998  ae_int_t n,
7999  /* Complex */ ae_vector* tau,
8000  ae_int_t qrows,
8001  /* Complex */ ae_matrix* q,
8002  ae_state *_state);
8003 void _pexec_cmatrixlqunpackq(/* Complex */ ae_matrix* a,
8004  ae_int_t m,
8005  ae_int_t n,
8006  /* Complex */ ae_vector* tau,
8007  ae_int_t qrows,
8008  /* Complex */ ae_matrix* q, ae_state *_state);
8009 void cmatrixlqunpackl(/* Complex */ ae_matrix* a,
8010  ae_int_t m,
8011  ae_int_t n,
8012  /* Complex */ ae_matrix* l,
8013  ae_state *_state);
8014 void rmatrixqrbasecase(/* Real */ ae_matrix* a,
8015  ae_int_t m,
8016  ae_int_t n,
8017  /* Real */ ae_vector* work,
8018  /* Real */ ae_vector* t,
8019  /* Real */ ae_vector* tau,
8020  ae_state *_state);
8021 void rmatrixlqbasecase(/* Real */ ae_matrix* a,
8022  ae_int_t m,
8023  ae_int_t n,
8024  /* Real */ ae_vector* work,
8025  /* Real */ ae_vector* t,
8026  /* Real */ ae_vector* tau,
8027  ae_state *_state);
8028 void rmatrixbd(/* Real */ ae_matrix* a,
8029  ae_int_t m,
8030  ae_int_t n,
8031  /* Real */ ae_vector* tauq,
8032  /* Real */ ae_vector* taup,
8033  ae_state *_state);
8034 void rmatrixbdunpackq(/* Real */ ae_matrix* qp,
8035  ae_int_t m,
8036  ae_int_t n,
8037  /* Real */ ae_vector* tauq,
8038  ae_int_t qcolumns,
8039  /* Real */ ae_matrix* q,
8040  ae_state *_state);
8041 void rmatrixbdmultiplybyq(/* Real */ ae_matrix* qp,
8042  ae_int_t m,
8043  ae_int_t n,
8044  /* Real */ ae_vector* tauq,
8045  /* Real */ ae_matrix* z,
8046  ae_int_t zrows,
8047  ae_int_t zcolumns,
8048  ae_bool fromtheright,
8049  ae_bool dotranspose,
8050  ae_state *_state);
8051 void rmatrixbdunpackpt(/* Real */ ae_matrix* qp,
8052  ae_int_t m,
8053  ae_int_t n,
8054  /* Real */ ae_vector* taup,
8055  ae_int_t ptrows,
8056  /* Real */ ae_matrix* pt,
8057  ae_state *_state);
8058 void rmatrixbdmultiplybyp(/* Real */ ae_matrix* qp,
8059  ae_int_t m,
8060  ae_int_t n,
8061  /* Real */ ae_vector* taup,
8062  /* Real */ ae_matrix* z,
8063  ae_int_t zrows,
8064  ae_int_t zcolumns,
8065  ae_bool fromtheright,
8066  ae_bool dotranspose,
8067  ae_state *_state);
8069  ae_int_t m,
8070  ae_int_t n,
8071  ae_bool* isupper,
8072  /* Real */ ae_vector* d,
8073  /* Real */ ae_vector* e,
8074  ae_state *_state);
8075 void rmatrixhessenberg(/* Real */ ae_matrix* a,
8076  ae_int_t n,
8077  /* Real */ ae_vector* tau,
8078  ae_state *_state);
8080  ae_int_t n,
8081  /* Real */ ae_vector* tau,
8082  /* Real */ ae_matrix* q,
8083  ae_state *_state);
8085  ae_int_t n,
8086  /* Real */ ae_matrix* h,
8087  ae_state *_state);
8088 void smatrixtd(/* Real */ ae_matrix* a,
8089  ae_int_t n,
8090  ae_bool isupper,
8091  /* Real */ ae_vector* tau,
8092  /* Real */ ae_vector* d,
8093  /* Real */ ae_vector* e,
8094  ae_state *_state);
8095 void smatrixtdunpackq(/* Real */ ae_matrix* a,
8096  ae_int_t n,
8097  ae_bool isupper,
8098  /* Real */ ae_vector* tau,
8099  /* Real */ ae_matrix* q,
8100  ae_state *_state);
8101 void hmatrixtd(/* Complex */ ae_matrix* a,
8102  ae_int_t n,
8103  ae_bool isupper,
8104  /* Complex */ ae_vector* tau,
8105  /* Real */ ae_vector* d,
8106  /* Real */ ae_vector* e,
8107  ae_state *_state);
8108 void hmatrixtdunpackq(/* Complex */ ae_matrix* a,
8109  ae_int_t n,
8110  ae_bool isupper,
8111  /* Complex */ ae_vector* tau,
8112  /* Complex */ ae_matrix* q,
8113  ae_state *_state);
8114 void fblscholeskysolve(/* Real */ ae_matrix* cha,
8115  double sqrtscalea,
8116  ae_int_t n,
8117  ae_bool isupper,
8118  /* Real */ ae_vector* xb,
8119  /* Real */ ae_vector* tmp,
8120  ae_state *_state);
8121 void fblssolvecgx(/* Real */ ae_matrix* a,
8122  ae_int_t m,
8123  ae_int_t n,
8124  double alpha,
8125  /* Real */ ae_vector* b,
8126  /* Real */ ae_vector* x,
8127  /* Real */ ae_vector* buf,
8128  ae_state *_state);
8129 void fblscgcreate(/* Real */ ae_vector* x,
8130  /* Real */ ae_vector* b,
8131  ae_int_t n,
8132  fblslincgstate* state,
8133  ae_state *_state);
8135 void fblssolvels(/* Real */ ae_matrix* a,
8136  /* Real */ ae_vector* b,
8137  ae_int_t m,
8138  ae_int_t n,
8139  /* Real */ ae_vector* tmp0,
8140  /* Real */ ae_vector* tmp1,
8141  /* Real */ ae_vector* tmp2,
8142  ae_state *_state);
8143 void _fblslincgstate_init(void* _p, ae_state *_state);
8144 void _fblslincgstate_init_copy(void* _dst, void* _src, ae_state *_state);
8145 void _fblslincgstate_clear(void* _p);
8148  /* Real */ ae_vector* e,
8149  ae_int_t n,
8150  ae_bool isupper,
8151  ae_bool isfractionalaccuracyrequired,
8152  /* Real */ ae_matrix* u,
8153  ae_int_t nru,
8154  /* Real */ ae_matrix* c,
8155  ae_int_t ncc,
8156  /* Real */ ae_matrix* vt,
8157  ae_int_t ncvt,
8158  ae_state *_state);
8160  /* Real */ ae_vector* e,
8161  ae_int_t n,
8162  ae_bool isupper,
8163  ae_bool isfractionalaccuracyrequired,
8164  /* Real */ ae_matrix* u,
8165  ae_int_t nru,
8166  /* Real */ ae_matrix* c,
8167  ae_int_t ncc,
8168  /* Real */ ae_matrix* vt,
8169  ae_int_t ncvt,
8170  ae_state *_state);
8172  ae_int_t m,
8173  ae_int_t n,
8174  ae_int_t uneeded,
8175  ae_int_t vtneeded,
8176  ae_int_t additionalmemory,
8177  /* Real */ ae_vector* w,
8178  /* Real */ ae_matrix* u,
8179  /* Real */ ae_matrix* vt,
8180  ae_state *_state);
8182  ae_int_t m,
8183  ae_int_t n,
8184  ae_int_t uneeded,
8185  ae_int_t vtneeded,
8186  ae_int_t additionalmemory,
8187  /* Real */ ae_vector* w,
8188  /* Real */ ae_matrix* u,
8189  /* Real */ ae_matrix* vt, ae_state *_state);
8191  ae_int_t n,
8192  ae_int_t nstart,
8193  ae_int_t nits,
8194  normestimatorstate* state,
8195  ae_state *_state);
8197  ae_int_t seedval,
8198  ae_state *_state);
8200  ae_state *_state);
8202  sparsematrix* a,
8203  ae_state *_state);
8205  double* nrm,
8206  ae_state *_state);
8208 void _normestimatorstate_init(void* _p, ae_state *_state);
8209 void _normestimatorstate_init_copy(void* _dst, void* _src, ae_state *_state);
8213  ae_int_t k,
8214  eigsubspacestate* state,
8215  ae_state *_state);
8217  ae_int_t k,
8218  eigsubspacestate* state,
8219  ae_state *_state);
8221  double eps,
8222  ae_int_t maxits,
8223  ae_state *_state);
8225  ae_int_t mtype,
8226  ae_state *_state);
8229  ae_int_t* requesttype,
8230  ae_int_t* requestsize,
8231  ae_state *_state);
8233  /* Real */ ae_matrix* x,
8234  ae_state *_state);
8236  /* Real */ ae_matrix* ax,
8237  ae_state *_state);
8239  /* Real */ ae_vector* w,
8240  /* Real */ ae_matrix* z,
8241  eigsubspacereport* rep,
8242  ae_state *_state);
8244  /* Real */ ae_matrix* a,
8245  ae_bool isupper,
8246  /* Real */ ae_vector* w,
8247  /* Real */ ae_matrix* z,
8248  eigsubspacereport* rep,
8249  ae_state *_state);
8251  /* Real */ ae_matrix* a,
8252  ae_bool isupper,
8253  /* Real */ ae_vector* w,
8254  /* Real */ ae_matrix* z,
8255  eigsubspacereport* rep, ae_state *_state);
8257  sparsematrix* a,
8258  ae_bool isupper,
8259  /* Real */ ae_vector* w,
8260  /* Real */ ae_matrix* z,
8261  eigsubspacereport* rep,
8262  ae_state *_state);
8265  ae_int_t n,
8266  ae_int_t zneeded,
8267  ae_bool isupper,
8268  /* Real */ ae_vector* d,
8269  /* Real */ ae_matrix* z,
8270  ae_state *_state);
8272  ae_int_t n,
8273  ae_int_t zneeded,
8274  ae_bool isupper,
8275  double b1,
8276  double b2,
8277  ae_int_t* m,
8278  /* Real */ ae_vector* w,
8279  /* Real */ ae_matrix* z,
8280  ae_state *_state);
8282  ae_int_t n,
8283  ae_int_t zneeded,
8284  ae_bool isupper,
8285  ae_int_t i1,
8286  ae_int_t i2,
8287  /* Real */ ae_vector* w,
8288  /* Real */ ae_matrix* z,
8289  ae_state *_state);
8290 ae_bool hmatrixevd(/* Complex */ ae_matrix* a,
8291  ae_int_t n,
8292  ae_int_t zneeded,
8293  ae_bool isupper,
8294  /* Real */ ae_vector* d,
8295  /* Complex */ ae_matrix* z,
8296  ae_state *_state);
8297 ae_bool hmatrixevdr(/* Complex */ ae_matrix* a,
8298  ae_int_t n,
8299  ae_int_t zneeded,
8300  ae_bool isupper,
8301  double b1,
8302  double b2,
8303  ae_int_t* m,
8304  /* Real */ ae_vector* w,
8305  /* Complex */ ae_matrix* z,
8306  ae_state *_state);
8307 ae_bool hmatrixevdi(/* Complex */ ae_matrix* a,
8308  ae_int_t n,
8309  ae_int_t zneeded,
8310  ae_bool isupper,
8311  ae_int_t i1,
8312  ae_int_t i2,
8313  /* Real */ ae_vector* w,
8314  /* Complex */ ae_matrix* z,
8315  ae_state *_state);
8317  /* Real */ ae_vector* e,
8318  ae_int_t n,
8319  ae_int_t zneeded,
8320  /* Real */ ae_matrix* z,
8321  ae_state *_state);
8323  /* Real */ ae_vector* e,
8324  ae_int_t n,
8325  ae_int_t zneeded,
8326  double a,
8327  double b,
8328  ae_int_t* m,
8329  /* Real */ ae_matrix* z,
8330  ae_state *_state);
8332  /* Real */ ae_vector* e,
8333  ae_int_t n,
8334  ae_int_t zneeded,
8335  ae_int_t i1,
8336  ae_int_t i2,
8337  /* Real */ ae_matrix* z,
8338  ae_state *_state);
8340  ae_int_t n,
8341  ae_int_t vneeded,
8342  /* Real */ ae_vector* wr,
8343  /* Real */ ae_vector* wi,
8344  /* Real */ ae_matrix* vl,
8345  /* Real */ ae_matrix* vr,
8346  ae_state *_state);
8347 void _eigsubspacestate_init(void* _p, ae_state *_state);
8348 void _eigsubspacestate_init_copy(void* _dst, void* _src, ae_state *_state);
8351 void _eigsubspacereport_init(void* _p, ae_state *_state);
8352 void _eigsubspacereport_init_copy(void* _dst, void* _src, ae_state *_state);
8356  ae_int_t n,
8357  /* Real */ ae_matrix* s,
8358  ae_state *_state);
8360  ae_int_t n,
8361  ae_bool isuppera,
8362  /* Real */ ae_matrix* b,
8363  ae_bool isupperb,
8364  ae_int_t zneeded,
8365  ae_int_t problemtype,
8366  /* Real */ ae_vector* d,
8367  /* Real */ ae_matrix* z,
8368  ae_state *_state);
8370  ae_int_t n,
8371  ae_bool isuppera,
8372  /* Real */ ae_matrix* b,
8373  ae_bool isupperb,
8374  ae_int_t problemtype,
8375  /* Real */ ae_matrix* r,
8376  ae_bool* isupperr,
8377  ae_state *_state);
8378 void rmatrixinvupdatesimple(/* Real */ ae_matrix* inva,
8379  ae_int_t n,
8380  ae_int_t updrow,
8381  ae_int_t updcolumn,
8382  double updval,
8383  ae_state *_state);
8384 void rmatrixinvupdaterow(/* Real */ ae_matrix* inva,
8385  ae_int_t n,
8386  ae_int_t updrow,
8387  /* Real */ ae_vector* v,
8388  ae_state *_state);
8389 void rmatrixinvupdatecolumn(/* Real */ ae_matrix* inva,
8390  ae_int_t n,
8391  ae_int_t updcolumn,
8392  /* Real */ ae_vector* u,
8393  ae_state *_state);
8394 void rmatrixinvupdateuv(/* Real */ ae_matrix* inva,
8395  ae_int_t n,
8396  /* Real */ ae_vector* u,
8397  /* Real */ ae_vector* v,
8398  ae_state *_state);
8399 double rmatrixludet(/* Real */ ae_matrix* a,
8400  /* Integer */ ae_vector* pivots,
8401  ae_int_t n,
8402  ae_state *_state);
8403 double rmatrixdet(/* Real */ ae_matrix* a,
8404  ae_int_t n,
8405  ae_state *_state);
8407  /* Integer */ ae_vector* pivots,
8408  ae_int_t n,
8409  ae_state *_state);
8411  ae_int_t n,
8412  ae_state *_state);
8413 double spdmatrixcholeskydet(/* Real */ ae_matrix* a,
8414  ae_int_t n,
8415  ae_state *_state);
8416 double spdmatrixdet(/* Real */ ae_matrix* a,
8417  ae_int_t n,
8418  ae_bool isupper,
8419  ae_state *_state);
8420 
8421 }
8422 #endif
8423 
ae_int_t ablasblocksize(ae_matrix *a, ae_state *_state)
bool smatrixgevd(const real_2d_array &a, const ae_int_t n, const bool isuppera, const real_2d_array &b, const bool isupperb, const ae_int_t zneeded, const ae_int_t problemtype, real_1d_array &d, real_2d_array &z)
ae_bool smatrixtdevdr(ae_vector *d, ae_vector *e, ae_int_t n, ae_int_t zneeded, double a, double b, ae_int_t *m, ae_matrix *z, ae_state *_state)
void _normestimatorstate_init(void *_p, ae_state *_state)
double rmatrixlurcond1(ae_matrix *lua, ae_int_t n, ae_state *_state)
void rmatrixhessenbergunpackh(const real_2d_array &a, const ae_int_t n, real_2d_array &h)
void sparsecopytocrsbuf(const sparsematrix &s0, const sparsematrix &s1)
void cmatrixqr(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_state *_state)
alglib_impl::sparsematrix * c_ptr()
void sparsesmm(sparsematrix *s, ae_bool isupper, ae_matrix *a, ae_int_t k, ae_matrix *b, ae_state *_state)
void rmatrixinverse(real_2d_array &a, const ae_int_t n, ae_int_t &info, matinvreport &rep)
ae_bool sparseishash(sparsematrix *s, ae_state *_state)
void rmatrixqrbasecase(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *work, ae_vector *t, ae_vector *tau, ae_state *_state)
eigsubspacereport & operator=(const eigsubspacereport &rhs)
double rmatrixludet(ae_matrix *a, ae_vector *pivots, ae_int_t n, ae_state *_state)
void spdmatrixinverse(real_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep)
bool smatrixevd(const real_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, real_1d_array &d, real_2d_array &z)
void _pexec_rmatrixlefttrsm(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t i1, ae_int_t j1, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_matrix *x, ae_int_t i2, ae_int_t j2, ae_state *_state)
void hmatrixrndmultiply(complex_2d_array &a, const ae_int_t n)
void cmatrixlqunpackq(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, const complex_1d_array &tau, const ae_int_t qrows, complex_2d_array &q)
void cmatrixrndcond(ae_int_t n, double c, ae_matrix *a, ae_state *_state)
void cmatrixherk(ae_int_t n, ae_int_t k, double alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, double beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_bool isupper, ae_state *_state)
bool rmatrixschur(real_2d_array &a, const ae_int_t n, real_2d_array &s)
void rmatrixhessenberg(real_2d_array &a, const ae_int_t n, real_1d_array &tau)
void rmatrixinvupdaterow(real_2d_array &inva, const ae_int_t n, const ae_int_t updrow, const real_1d_array &v)
double hpdmatrixcholeskyrcond(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void rmatrixbdunpackpt(const real_2d_array &qp, const ae_int_t m, const ae_int_t n, const real_1d_array &taup, const ae_int_t ptrows, real_2d_array &pt)
ae_bool rmatrixsvd(ae_matrix *a, ae_int_t m, ae_int_t n, ae_int_t uneeded, ae_int_t vtneeded, ae_int_t additionalmemory, ae_vector *w, ae_matrix *u, ae_matrix *vt, ae_state *_state)
void sparsemm(const sparsematrix &s, const real_2d_array &a, const ae_int_t k, real_2d_array &b)
void rmatrixqrunpackr(const real_2d_array &a, const ae_int_t m, const ae_int_t n, real_2d_array &r)
void cmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper)
void cmatrixsyrk(ae_int_t n, ae_int_t k, double alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, double beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_bool isupper, ae_state *_state)
bool hmatrixevdi(const complex_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, const ae_int_t i1, const ae_int_t i2, real_1d_array &w, complex_2d_array &z)
void sparsecreatesksbuf(ae_int_t m, ae_int_t n, ae_vector *d, ae_vector *u, sparsematrix *s, ae_state *_state)
void _pexec_rmatrixlu(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *pivots, ae_state *_state)
double rmatrixdet(const real_2d_array &a, const ae_int_t n)
void cmatrixrndorthogonalfromtheright(complex_2d_array &a, const ae_int_t m, const ae_int_t n)
bool smp_spdmatrixcholesky(real_2d_array &a, const ae_int_t n, const bool isupper)
void rmatrixgemm(ae_int_t m, ae_int_t n, ae_int_t k, double alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, ae_matrix *b, ae_int_t ib, ae_int_t jb, ae_int_t optypeb, double beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_state *_state)
void _pexec_hpdmatrixinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_int_t *info, matinvreport *rep, ae_state *_state)
void cmatrixrndorthogonalfromtheright(ae_matrix *a, ae_int_t m, ae_int_t n, ae_state *_state)
double rmatrixlurcondinf(const real_2d_array &lua, const ae_int_t n)
ae_bool _pexec_rmatrixsvd(ae_matrix *a, ae_int_t m, ae_int_t n, ae_int_t uneeded, ae_int_t vtneeded, ae_int_t additionalmemory, ae_vector *w, ae_matrix *u, ae_matrix *vt, ae_state *_state)
void _sparsematrix_init(void *_p, ae_state *_state)
void rmatrixlqunpackl(const real_2d_array &a, const ae_int_t m, const ae_int_t n, real_2d_array &l)
void eigsubspacesolvedenses(const eigsubspacestate &state, const real_2d_array &a, const bool isupper, real_1d_array &w, real_2d_array &z, eigsubspacereport &rep)
void smp_cmatrixqr(complex_2d_array &a, const ae_int_t m, const ae_int_t n, complex_1d_array &tau)
ae_int_t sparsegetnrows(const sparsematrix &s)
void smatrixtdunpackq(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_vector *tau, ae_matrix *q, ae_state *_state)
void _pexec_cmatrixinverse(ae_matrix *a, ae_int_t n, ae_int_t *info, matinvreport *rep, ae_state *_state)
void rmatrixrighttrsm(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t i1, ae_int_t j1, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_matrix *x, ae_int_t i2, ae_int_t j2, ae_state *_state)
void _eigsubspacestate_destroy(void *_p)
void rmatrixlefttrsm(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t i1, ae_int_t j1, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_matrix *x, ae_int_t i2, ae_int_t j2, ae_state *_state)
_matinvreport_owner & operator=(const _matinvreport_owner &rhs)
void rmatrixbdmultiplybyp(ae_matrix *qp, ae_int_t m, ae_int_t n, ae_vector *taup, ae_matrix *z, ae_int_t zrows, ae_int_t zcolumns, ae_bool fromtheright, ae_bool dotranspose, ae_state *_state)
alglib_impl::normestimatorstate * p_struct
Definition: linalg.h:296
void cmatrixmv(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t opa, const complex_1d_array &x, const ae_int_t ix, complex_1d_array &y, const ae_int_t iy)
void spdmatrixcholeskyupdateadd1buf(const real_2d_array &a, const ae_int_t n, const bool isupper, const real_1d_array &u, real_1d_array &bufr)
ae_bool hmatrixevdi(ae_matrix *a, ae_int_t n, ae_int_t zneeded, ae_bool isupper, ae_int_t i1, ae_int_t i2, ae_vector *w, ae_matrix *z, ae_state *_state)
bool rmatrixbdsvd(real_1d_array &d, const real_1d_array &e, const ae_int_t n, const bool isupper, const bool isfractionalaccuracyrequired, real_2d_array &u, const ae_int_t nru, real_2d_array &c, const ae_int_t ncc, real_2d_array &vt, const ae_int_t ncvt)
void cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::complex beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc)
void smp_cmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t i2, const ae_int_t j2)
ae_bool spdmatrixcholesky(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void normestimatorcreate(const ae_int_t m, const ae_int_t n, const ae_int_t nstart, const ae_int_t nits, normestimatorstate &state)
void sparsemtv(sparsematrix *s, ae_vector *x, ae_vector *y, ae_state *_state)
void cmatrixluinverse(ae_matrix *a, ae_vector *pivots, ae_int_t n, ae_int_t *info, matinvreport *rep, ae_state *_state)
void sparsecopytosks(const sparsematrix &s0, sparsematrix &s1)
void _normestimatorstate_init_copy(void *_dst, void *_src, ae_state *_state)
double cmatrixlurcondinf(const complex_2d_array &lua, const ae_int_t n)
ae_bool smatrixevdr(ae_matrix *a, ae_int_t n, ae_int_t zneeded, ae_bool isupper, double b1, double b2, ae_int_t *m, ae_vector *w, ae_matrix *z, ae_state *_state)
alglib::complex cmatrixludet(const complex_2d_array &a, const integer_1d_array &pivots, const ae_int_t n)
ae_bool sparseissks(sparsematrix *s, ae_state *_state)
void sparseresizematrix(sparsematrix *s, ae_state *_state)
void cmatrixqrunpackq(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, const complex_1d_array &tau, const ae_int_t qcolumns, complex_2d_array &q)
void cmatrixqrunpackr(ae_matrix *a, ae_int_t m, ae_int_t n, ae_matrix *r, ae_state *_state)
void _sparsematrix_init_copy(void *_dst, void *_src, ae_state *_state)
bool smatrixtdevd(real_1d_array &d, const real_1d_array &e, const ae_int_t n, const ae_int_t zneeded, real_2d_array &z)
void sparsemv(const sparsematrix &s, const real_1d_array &x, real_1d_array &y)
_eigsubspacereport_owner & operator=(const _eigsubspacereport_owner &rhs)
void sparsecopybuf(const sparsematrix &s0, const sparsematrix &s1)
void spdmatrixcholeskyinverse(real_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep)
void smatrixtd(real_2d_array &a, const ae_int_t n, const bool isupper, real_1d_array &tau, real_1d_array &d, real_1d_array &e)
void eigsubspaceoocstop(eigsubspacestate *state, ae_vector *w, ae_matrix *z, eigsubspacereport *rep, ae_state *_state)
void sparsesmv(const sparsematrix &s, const bool isupper, const real_1d_array &x, real_1d_array &y)
ae_bool rmatrixschur(ae_matrix *a, ae_int_t n, ae_matrix *s, ae_state *_state)
alglib::complex cmatrixdet(const complex_2d_array &a, const ae_int_t n)
void _pexec_cmatrixqrunpackq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_int_t qcolumns, ae_matrix *q, ae_state *_state)
void eigsubspaceoocstop(const eigsubspacestate &state, real_1d_array &w, real_2d_array &z, eigsubspacereport &rep)
void sparsecreatesks(ae_int_t m, ae_int_t n, ae_vector *d, ae_vector *u, sparsematrix *s, ae_state *_state)
double cmatrixrcondinf(ae_matrix *a, ae_int_t n, ae_state *_state)
void sparsecreate(const ae_int_t m, const ae_int_t n, const ae_int_t k, sparsematrix &s)
void rmatrixhessenbergunpackh(ae_matrix *a, ae_int_t n, ae_matrix *h, ae_state *_state)
void sparsemtm(const sparsematrix &s, const real_2d_array &a, const ae_int_t k, real_2d_array &b)
void sparseresizematrix(const sparsematrix &s)
void rmatrixbdunpackdiagonals(ae_matrix *b, ae_int_t m, ae_int_t n, ae_bool *isupper, ae_vector *d, ae_vector *e, ae_state *_state)
void sparseconverttocrs(const sparsematrix &s)
double cmatrixtrrcond1(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_bool isunit, ae_state *_state)
void fblssolvels(ae_matrix *a, ae_vector *b, ae_int_t m, ae_int_t n, ae_vector *tmp0, ae_vector *tmp1, ae_vector *tmp2, ae_state *_state)
void rmatrixenforcesymmetricity(const real_2d_array &a, const ae_int_t n, const bool isupper)
void rmatrixbd(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tauq, ae_vector *taup, ae_state *_state)
void sparsetrsv(sparsematrix *s, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_vector *x, ae_state *_state)
alglib_impl::eigsubspacereport * c_ptr()
ae_bool smatrixevd(ae_matrix *a, ae_int_t n, ae_int_t zneeded, ae_bool isupper, ae_vector *d, ae_matrix *z, ae_state *_state)
double rmatrixtrrcond1(const real_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit)
void sparsecreatesks(const ae_int_t m, const ae_int_t n, const integer_1d_array &d, const integer_1d_array &u, sparsematrix &s)
void smatrixrndcond(ae_int_t n, double c, ae_matrix *a, ae_state *_state)
void cmatrixlqunpackl(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, complex_2d_array &l)
void smp_rmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper)
void sparsecopybuf(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
void eigsubspaceoocsendresult(eigsubspacestate *state, ae_matrix *ax, ae_state *_state)
double hpdmatrixcholeskyrcond(const complex_2d_array &a, const ae_int_t n, const bool isupper)
void _pexec_rmatrixgemm(ae_int_t m, ae_int_t n, ae_int_t k, double alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, ae_matrix *b, ae_int_t ib, ae_int_t jb, ae_int_t optypeb, double beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_state *_state)
void rmatrixcopy(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, real_2d_array &b, const ae_int_t ib, const ae_int_t jb)
void smatrixrndmultiply(ae_matrix *a, ae_int_t n, ae_state *_state)
ae_bool smatrixgevdreduce(ae_matrix *a, ae_int_t n, ae_bool isuppera, ae_matrix *b, ae_bool isupperb, ae_int_t problemtype, ae_matrix *r, ae_bool *isupperr, ae_state *_state)
sparsematrix & operator=(const sparsematrix &rhs)
void sparsecopytohashbuf(const sparsematrix &s0, const sparsematrix &s1)
void _pexec_rmatrixtrinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_bool isunit, ae_int_t *info, matinvreport *rep, ae_state *_state)
void _matinvreport_init_copy(void *_dst, void *_src, ae_state *_state)
void rmatrixlq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_state *_state)
void smp_cmatrixlqunpackq(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, const complex_1d_array &tau, const ae_int_t qrows, complex_2d_array &q)
void sparseconverttocrs(sparsematrix *s, ae_state *_state)
ae_int_t sparsegetncols(const sparsematrix &s)
void rmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2)
void rmatrixrndorthogonalfromtheleft(real_2d_array &a, const ae_int_t m, const ae_int_t n)
void sparsemtm(sparsematrix *s, ae_matrix *a, ae_int_t k, ae_matrix *b, ae_state *_state)
void hmatrixtdunpackq(const complex_2d_array &a, const ae_int_t n, const bool isupper, const complex_1d_array &tau, complex_2d_array &q)
void sparseswap(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
void hmatrixrndcond(const ae_int_t n, const double c, complex_2d_array &a)
void cmatrixrighttrsm(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t i1, ae_int_t j1, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_matrix *x, ae_int_t i2, ae_int_t j2, ae_state *_state)
void smp_rmatrixtrinverse(real_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit, ae_int_t &info, matinvreport &rep)
void _eigsubspacestate_init_copy(void *_dst, void *_src, ae_state *_state)
ae_bool _pexec_hpdmatrixcholesky(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void rmatrixlu(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *pivots, ae_state *_state)
void hmatrixrndmultiply(ae_matrix *a, ae_int_t n, ae_state *_state)
void sparsegetrow(sparsematrix *s, ae_int_t i, ae_vector *irow, ae_state *_state)
double rmatrixtrrcondinf(const real_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit)
void eigsubspacecreatebuf(const ae_int_t n, const ae_int_t k, const eigsubspacestate &state)
void _fblslincgstate_destroy(void *_p)
void rmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper)
void sparsetrsv(const sparsematrix &s, const bool isupper, const bool isunit, const ae_int_t optype, const real_1d_array &x)
ae_bool hpdmatrixcholesky(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void rmatrixqrunpackr(ae_matrix *a, ae_int_t m, ae_int_t n, ae_matrix *r, ae_state *_state)
void cmatrixgemm(ae_int_t m, ae_int_t n, ae_int_t k, ae_complex alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, ae_matrix *b, ae_int_t ib, ae_int_t jb, ae_int_t optypeb, ae_complex beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_state *_state)
void hpdmatrixcholeskyinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep)
void sparsecopytosksbuf(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
void cmatrixrndorthogonal(const ae_int_t n, complex_2d_array &a)
void sparsecopy(const sparsematrix &s0, sparsematrix &s1)
void sparsecopytohash(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
alglib_impl::sparsematrix * p_struct
Definition: linalg.h:224
void cmatrixlq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_state *_state)
void rmatrixrndorthogonal(ae_int_t n, ae_matrix *a, ae_state *_state)
void spdmatrixcholeskyupdatefixbuf(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_vector *fix, ae_vector *bufr, ae_state *_state)
void rmatrixqr(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tau)
void fblssolvecgx(ae_matrix *a, ae_int_t m, ae_int_t n, double alpha, ae_vector *b, ae_vector *x, ae_vector *buf, ae_state *_state)
void rmatrixrank1(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_vector *u, ae_int_t iu, ae_vector *v, ae_int_t iv, ae_state *_state)
bool smatrixevdr(const real_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, const double b1, const double b2, ae_int_t &m, real_1d_array &w, real_2d_array &z)
alglib_impl::sparsebuffers * c_ptr()
void eigsubspacesolvedenses(eigsubspacestate *state, ae_matrix *a, ae_bool isupper, ae_vector *w, ae_matrix *z, eigsubspacereport *rep, ae_state *_state)
void sparsecreatecrs(const ae_int_t m, const ae_int_t n, const integer_1d_array &ner, sparsematrix &s)
void spdmatrixcholeskyupdatefixbuf(const real_2d_array &a, const ae_int_t n, const bool isupper, const boolean_1d_array &fix, real_1d_array &bufr)
void eigsubspaceoocsendresult(const eigsubspacestate &state, const real_2d_array &ax)
void rmatrixrank1(const ae_int_t m, const ae_int_t n, real_2d_array &a, const ae_int_t ia, const ae_int_t ja, real_1d_array &u, const ae_int_t iu, real_1d_array &v, const ae_int_t iv)
double cmatrixtrrcond1(const complex_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit)
void smp_rmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2)
void _pexec_cmatrixlq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_state *_state)
void rmatrixinvupdatesimple(real_2d_array &inva, const ae_int_t n, const ae_int_t updrow, const ae_int_t updcolumn, const double updval)
void rmatrixbd(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tauq, real_1d_array &taup)
void smp_hpdmatrixinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep)
_eigsubspacestate_owner & operator=(const _eigsubspacestate_owner &rhs)
double sparseget(const sparsematrix &s, const ae_int_t i, const ae_int_t j)
void spdmatrixrndcond(const ae_int_t n, const double c, real_2d_array &a)
void cmatrixtranspose(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_matrix *b, ae_int_t ib, ae_int_t jb, ae_state *_state)
void spdmatrixcholeskyupdatefix(const real_2d_array &a, const ae_int_t n, const bool isupper, const boolean_1d_array &fix)
void rmatrixlq(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tau)
void hpdmatrixinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep)
double cmatrixlurcondinf(ae_matrix *lua, ae_int_t n, ae_state *_state)
void sparseconverttosks(const sparsematrix &s)
ae_complex cmatrixdet(ae_matrix *a, ae_int_t n, ae_state *_state)
void eigsubspacecreate(ae_int_t n, ae_int_t k, eigsubspacestate *state, ae_state *_state)
ae_bool eigsubspaceiteration(eigsubspacestate *state, ae_state *_state)
void cmatrixlqunpackl(ae_matrix *a, ae_int_t m, ae_int_t n, ae_matrix *l, ae_state *_state)
void rmatrixlqunpackl(ae_matrix *a, ae_int_t m, ae_int_t n, ae_matrix *l, ae_state *_state)
void sparsemv2(sparsematrix *s, ae_vector *x, ae_vector *y0, ae_vector *y1, ae_state *_state)
void rmatrixtrinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_bool isunit, ae_int_t *info, matinvreport *rep, ae_state *_state)
ae_bool spdmatrixcholeskyrec(ae_matrix *a, ae_int_t offs, ae_int_t n, ae_bool isupper, ae_vector *tmp, ae_state *_state)
_sparsematrix_owner & operator=(const _sparsematrix_owner &rhs)
void rmatrixlu(real_2d_array &a, const ae_int_t m, const ae_int_t n, integer_1d_array &pivots)
bool smatrixtdevdi(real_1d_array &d, const real_1d_array &e, const ae_int_t n, const ae_int_t zneeded, const ae_int_t i1, const ae_int_t i2, real_2d_array &z)
void smp_cmatrixlq(complex_2d_array &a, const ae_int_t m, const ae_int_t n, complex_1d_array &tau)
matinvreport(const matinvreport &rhs)
void smp_rmatrixlu(real_2d_array &a, const ae_int_t m, const ae_int_t n, integer_1d_array &pivots)
void cmatrixtrinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_bool isunit, ae_int_t *info, matinvreport *rep, ae_state *_state)
void sparseconverttosks(sparsematrix *s, ae_state *_state)
double hpdmatrixrcond(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void eigsubspacesolvesparses(eigsubspacestate *state, sparsematrix *a, ae_bool isupper, ae_vector *w, ae_matrix *z, eigsubspacereport *rep, ae_state *_state)
#define ae_bool
Definition: ap.h:193
void ablascomplexsplitlength(ae_matrix *a, ae_int_t n, ae_int_t *n1, ae_int_t *n2, ae_state *_state)
eigsubspacereport(const eigsubspacereport &rhs)
void rmatrixhessenberg(ae_matrix *a, ae_int_t n, ae_vector *tau, ae_state *_state)
ae_int_t sparsegetnrows(sparsematrix *s, ae_state *_state)
void cmatrixqrunpackq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_int_t qcolumns, ae_matrix *q, ae_state *_state)
void sparsecopytosks(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
void smatrixtdunpackq(const real_2d_array &a, const ae_int_t n, const bool isupper, const real_1d_array &tau, real_2d_array &q)
ae_int_t & iterationscount
Definition: linalg.h:356
void _matinvreport_init(void *_p, ae_state *_state)
void rmatrixqrunpackq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_int_t qcolumns, ae_matrix *q, ae_state *_state)
bool sparsecholeskyskyline(const sparsematrix &a, const ae_int_t n, const bool isupper)
void normestimatorresults(normestimatorstate *state, double *nrm, ae_state *_state)
void sparseconverttohash(const sparsematrix &s)
double spdmatrixdet(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void hmatrixtd(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_vector *tau, ae_vector *d, ae_vector *e, ae_state *_state)
void _pexec_cmatrixrighttrsm(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t i1, ae_int_t j1, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_matrix *x, ae_int_t i2, ae_int_t j2, ae_state *_state)
ae_int_t sparsegetmatrixtype(const sparsematrix &s)
void cmatrixlq(complex_2d_array &a, const ae_int_t m, const ae_int_t n, complex_1d_array &tau)
bool spdmatrixcholesky(real_2d_array &a, const ae_int_t n, const bool isupper)
void sparseconvertto(const sparsematrix &s0, const ae_int_t fmt)
void _pexec_rmatrixsyrk(ae_int_t n, ae_int_t k, double alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, double beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_bool isupper, ae_state *_state)
void smatrixrndmultiply(real_2d_array &a, const ae_int_t n)
ae_int_t sparsegetmatrixtype(sparsematrix *s, ae_state *_state)
double spdmatrixdet(const real_2d_array &a, const ae_int_t n, const bool isupper)
void _fblslincgstate_init_copy(void *_dst, void *_src, ae_state *_state)
void rmatrixhessenbergunpackq(ae_matrix *a, ae_int_t n, ae_vector *tau, ae_matrix *q, ae_state *_state)
void _pexec_eigsubspacesolvedenses(eigsubspacestate *state, ae_matrix *a, ae_bool isupper, ae_vector *w, ae_matrix *z, eigsubspacereport *rep, ae_state *_state)
void rmatrixinvupdatecolumn(real_2d_array &inva, const ae_int_t n, const ae_int_t updcolumn, const real_1d_array &u)
void rmatrixbdunpackq(ae_matrix *qp, ae_int_t m, ae_int_t n, ae_vector *tauq, ae_int_t qcolumns, ae_matrix *q, ae_state *_state)
ae_bool fblscgiteration(fblslincgstate *state, ae_state *_state)
void rmatrixlqunpackq(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const real_1d_array &tau, const ae_int_t qrows, real_2d_array &q)
eigsubspacestate(const eigsubspacestate &rhs)
normestimatorstate & operator=(const normestimatorstate &rhs)
void _pexec_rmatrixrighttrsm(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t i1, ae_int_t j1, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_matrix *x, ae_int_t i2, ae_int_t j2, ae_state *_state)
void _sparsebuffers_clear(void *_p)
void sparsecreatecrsbuf(const ae_int_t m, const ae_int_t n, const integer_1d_array &ner, const sparsematrix &s)
void sparseconverttohash(sparsematrix *s, ae_state *_state)
bool rmatrixsvd(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const ae_int_t uneeded, const ae_int_t vtneeded, const ae_int_t additionalmemory, real_1d_array &w, real_2d_array &u, real_2d_array &vt)
void smp_cmatrixlu(complex_2d_array &a, const ae_int_t m, const ae_int_t n, integer_1d_array &pivots)
void normestimatorsetseed(normestimatorstate *state, ae_int_t seedval, ae_state *_state)
bool smp_hpdmatrixcholesky(complex_2d_array &a, const ae_int_t n, const bool isupper)
void rmatrixtranspose(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const real_2d_array &b, const ae_int_t ib, const ae_int_t jb)
void cmatrixrndorthogonal(ae_int_t n, ae_matrix *a, ae_state *_state)
void hpdmatrixinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_int_t *info, matinvreport *rep, ae_state *_state)
void _pexec_hpdmatrixcholeskyinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_int_t *info, matinvreport *rep, ae_state *_state)
void sparsecopytobuf(sparsematrix *s0, ae_int_t fmt, sparsematrix *s1, ae_state *_state)
void sparsetrmv(sparsematrix *s, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_vector *x, ae_vector *y, ae_state *_state)
void sparsecreatesksbuf(const ae_int_t m, const ae_int_t n, const integer_1d_array &d, const integer_1d_array &u, const sparsematrix &s)
void rmatrixinvupdatesimple(ae_matrix *inva, ae_int_t n, ae_int_t updrow, ae_int_t updcolumn, double updval, ae_state *_state)
ae_int_t sparsegetlowercount(sparsematrix *s, ae_state *_state)
void hpdmatrixrndcond(const ae_int_t n, const double c, complex_2d_array &a)
void cmatrixlqunpackq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_int_t qrows, ae_matrix *q, ae_state *_state)
bool hpdmatrixcholesky(complex_2d_array &a, const ae_int_t n, const bool isupper)
void _sparsematrix_clear(void *_p)
void _matinvreport_clear(void *_p)
double rmatrixludet(const real_2d_array &a, const integer_1d_array &pivots, const ae_int_t n)
ae_complex cmatrixludet(ae_matrix *a, ae_vector *pivots, ae_int_t n, ae_state *_state)
void sparsetransposesks(sparsematrix *s, ae_state *_state)
void cmatrixherk(const ae_int_t n, const ae_int_t k, const double alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper)
struct alglib_impl::ae_vector ae_vector
sparsebuffers & operator=(const sparsebuffers &rhs)
void _sparsebuffers_init(void *_p, ae_state *_state)
void normestimatorestimatesparse(const normestimatorstate &state, const sparsematrix &a)
bool sparseissks(const sparsematrix &s)
sparsematrix(const sparsematrix &rhs)
void rmatrixrndcond(ae_int_t n, double c, ae_matrix *a, ae_state *_state)
_normestimatorstate_owner & operator=(const _normestimatorstate_owner &rhs)
void sparsetrmv(const sparsematrix &s, const bool isupper, const bool isunit, const ae_int_t optype, const real_1d_array &x, real_1d_array &y)
void cmatrixluinverse(complex_2d_array &a, const integer_1d_array &pivots, const ae_int_t n, ae_int_t &info, matinvreport &rep)
ae_int_t sparsegetuppercount(const sparsematrix &s)
void sparsecopytocrsbuf(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
void smp_rmatrixluinverse(real_2d_array &a, const integer_1d_array &pivots, const ae_int_t n, ae_int_t &info, matinvreport &rep)
void rmatrixinvupdaterow(ae_matrix *inva, ae_int_t n, ae_int_t updrow, ae_vector *v, ae_state *_state)
void smp_cmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const alglib::complex alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const complex_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const alglib::complex beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc)
void _pexec_cmatrixsyrk(ae_int_t n, ae_int_t k, double alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, double beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_bool isupper, ae_state *_state)
void _pexec_rmatrixinverse(ae_matrix *a, ae_int_t n, ae_int_t *info, matinvreport *rep, ae_state *_state)
void _pexec_rmatrixluinverse(ae_matrix *a, ae_vector *pivots, ae_int_t n, ae_int_t *info, matinvreport *rep, ae_state *_state)
void sparsesmv(sparsematrix *s, ae_bool isupper, ae_vector *x, ae_vector *y, ae_state *_state)
void rmatrixinvupdateuv(real_2d_array &inva, const ae_int_t n, const real_1d_array &u, const real_1d_array &v)
sparsebuffers(const sparsebuffers &rhs)
ae_bool rmatrixevd(ae_matrix *a, ae_int_t n, ae_int_t vneeded, ae_vector *wr, ae_vector *wi, ae_matrix *vl, ae_matrix *vr, ae_state *_state)
void sparsemtv(const sparsematrix &s, const real_1d_array &x, real_1d_array &y)
void cmatrixinverse(complex_2d_array &a, const ae_int_t n, ae_int_t &info, matinvreport &rep)
void spdmatrixcholeskyinverserec(ae_matrix *a, ae_int_t offs, ae_int_t n, ae_bool isupper, ae_vector *tmp, ae_state *_state)
void sparsecopytohash(const sparsematrix &s0, sparsematrix &s1)
void rmatrixlqbasecase(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *work, ae_vector *t, ae_vector *tau, ae_state *_state)
bool sparseiscrs(const sparsematrix &s)
double cmatrixlurcond1(const complex_2d_array &lua, const ae_int_t n)
void _eigsubspacereport_destroy(void *_p)
void _fblslincgstate_init(void *_p, ae_state *_state)
void rmatrixcopy(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_matrix *b, ae_int_t ib, ae_int_t jb, ae_state *_state)
void cmatrixlefttrsm(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t i1, ae_int_t j1, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_matrix *x, ae_int_t i2, ae_int_t j2, ae_state *_state)
void _pexec_cmatrixtrinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_bool isunit, ae_int_t *info, matinvreport *rep, ae_state *_state)
void spdmatrixcholeskyupdatefix(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_vector *fix, ae_state *_state)
ae_bool sparseiscrs(sparsematrix *s, ae_state *_state)
double hpdmatrixrcond(const complex_2d_array &a, const ae_int_t n, const bool isupper)
void _sparsebuffers_init_copy(void *_dst, void *_src, ae_state *_state)
void fblscholeskysolve(ae_matrix *cha, double sqrtscalea, ae_int_t n, ae_bool isupper, ae_vector *xb, ae_vector *tmp, ae_state *_state)
void cmatrixinverse(ae_matrix *a, ae_int_t n, ae_int_t *info, matinvreport *rep, ae_state *_state)
void rmatrixbdunpackdiagonals(const real_2d_array &b, const ae_int_t m, const ae_int_t n, bool &isupper, real_1d_array &d, real_1d_array &e)
double cmatrixrcondinf(const complex_2d_array &a, const ae_int_t n)
void smp_rmatrixqr(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tau)
void sparsecopytocrs(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
void _pexec_cmatrixlefttrsm(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t i1, ae_int_t j1, ae_bool isupper, ae_bool isunit, ae_int_t optype, ae_matrix *x, ae_int_t i2, ae_int_t j2, ae_state *_state)
eigsubspacestate & operator=(const eigsubspacestate &rhs)
void normestimatorcreate(ae_int_t m, ae_int_t n, ae_int_t nstart, ae_int_t nits, normestimatorstate *state, ae_state *_state)
void cmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t i2, const ae_int_t j2)
void rmatrixrndorthogonalfromtheright(real_2d_array &a, const ae_int_t m, const ae_int_t n)
void rmatrixmv(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t opa, const real_1d_array &x, const ae_int_t ix, real_1d_array &y, const ae_int_t iy)
double rmatrixtrrcond1(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_bool isunit, ae_state *_state)
ae_bool sparsecholeskyx(sparsematrix *a, ae_int_t n, ae_bool isupper, ae_vector *p0, ae_vector *p1, ae_int_t ordering, ae_int_t algo, ae_int_t fmt, sparsebuffers *buf, sparsematrix *c, ae_state *_state)
void cmatrixlu(complex_2d_array &a, const ae_int_t m, const ae_int_t n, integer_1d_array &pivots)
double rmatrixrcond1(const real_2d_array &a, const ae_int_t n)
alglib_impl::eigsubspacestate * p_struct
Definition: linalg.h:323
ae_int_t sparsegetncols(sparsematrix *s, ae_state *_state)
void cmatrixmv(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t opa, ae_vector *x, ae_int_t ix, ae_vector *y, ae_int_t iy, ae_state *_state)
void smatrixrndcond(const ae_int_t n, const double c, real_2d_array &a)
double rmatrixlurcondinf(ae_matrix *lua, ae_int_t n, ae_state *_state)
void rmatrixinverse(ae_matrix *a, ae_int_t n, ae_int_t *info, matinvreport *rep, ae_state *_state)
ae_bool eigsubspaceooccontinue(eigsubspacestate *state, ae_state *_state)
void _eigsubspacereport_init_copy(void *_dst, void *_src, ae_state *_state)
void smp_rmatrixlq(real_2d_array &a, const ae_int_t m, const ae_int_t n, real_1d_array &tau)
void hmatrixrndcond(ae_int_t n, double c, ae_matrix *a, ae_state *_state)
void normestimatorrestart(normestimatorstate *state, ae_state *_state)
void rmatrixbdmultiplybyq(ae_matrix *qp, ae_int_t m, ae_int_t n, ae_vector *tauq, ae_matrix *z, ae_int_t zrows, ae_int_t zcolumns, ae_bool fromtheright, ae_bool dotranspose, ae_state *_state)
void sparseswap(const sparsematrix &s0, const sparsematrix &s1)
_sparsebuffers_owner & operator=(const _sparsebuffers_owner &rhs)
void cmatrixrndorthogonalfromtheleft(complex_2d_array &a, const ae_int_t m, const ae_int_t n)
void ablassplitlength(ae_matrix *a, ae_int_t n, ae_int_t *n1, ae_int_t *n2, ae_state *_state)
double cmatrixrcond1(const complex_2d_array &a, const ae_int_t n)
void rmatrixhessenbergunpackq(const real_2d_array &a, const ae_int_t n, const real_1d_array &tau, real_2d_array &q)
void smp_spdmatrixinverse(real_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep)
void sparsecopytosksbuf(const sparsematrix &s0, const sparsematrix &s1)
void sparsegetcompressedrow(const sparsematrix &s, const ae_int_t i, integer_1d_array &colidx, real_1d_array &vals, ae_int_t &nzcnt)
void _sparsebuffers_destroy(void *_p)
void _pexec_cmatrixluinverse(ae_matrix *a, ae_vector *pivots, ae_int_t n, ae_int_t *info, matinvreport *rep, ae_state *_state)
void cmatrixlu(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *pivots, ae_state *_state)
void hmatrixtd(complex_2d_array &a, const ae_int_t n, const bool isupper, complex_1d_array &tau, real_1d_array &d, real_1d_array &e)
alglib_impl::ae_int_t ae_int_t
Definition: ap.h:965
ae_int_t sparsegetuppercount(sparsematrix *s, ae_state *_state)
void _fblslincgstate_clear(void *_p)
void smp_cmatrixtrinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit, ae_int_t &info, matinvreport &rep)
void hpdmatrixrndcond(ae_int_t n, double c, ae_matrix *a, ae_state *_state)
void smp_cmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper)
void sparsecopytobuf(const sparsematrix &s0, const ae_int_t fmt, const sparsematrix &s1)
double spdmatrixcholeskydet(const real_2d_array &a, const ae_int_t n)
void rmatrixplu(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *pivots, ae_state *_state)
void rmatrixlqunpackq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_int_t qrows, ae_matrix *q, ae_state *_state)
double rmatrixrcondinf(ae_matrix *a, ae_int_t n, ae_state *_state)
void rmatrixrndcond(const ae_int_t n, const double c, real_2d_array &a)
void eigsubspacesolvesparses(const eigsubspacestate &state, const sparsematrix &a, const bool isupper, real_1d_array &w, real_2d_array &z, eigsubspacereport &rep)
void rmatrixluinverse(real_2d_array &a, const integer_1d_array &pivots, const ae_int_t n, ae_int_t &info, matinvreport &rep)
ae_bool sparsecholeskyskyline(sparsematrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
double sparsegetaveragelengthofchain(sparsematrix *s, ae_state *_state)
void rmatrixenforcesymmetricity(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void sparsecopytohashbuf(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
ae_bool _pexec_spdmatrixcholesky(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void sparsegetcompressedrow(sparsematrix *s, ae_int_t i, ae_vector *colidx, ae_vector *vals, ae_int_t *nzcnt, ae_state *_state)
void sparsecreatecrs(ae_int_t m, ae_int_t n, ae_vector *ner, sparsematrix *s, ae_state *_state)
void sparsefree(sparsematrix &s)
void _pexec_rmatrixlq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_state *_state)
void smp_cmatrixherk(const ae_int_t n, const ae_int_t k, const double alpha, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const complex_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper)
void rmatrixbdunpackpt(ae_matrix *qp, ae_int_t m, ae_int_t n, ae_vector *taup, ae_int_t ptrows, ae_matrix *pt, ae_state *_state)
void cmatrixplu(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *pivots, ae_state *_state)
void rmatrixluinverse(ae_matrix *a, ae_vector *pivots, ae_int_t n, ae_int_t *info, matinvreport *rep, ae_state *_state)
void sparsemm2(const sparsematrix &s, const real_2d_array &a, const ae_int_t k, real_2d_array &b0, real_2d_array &b1)
void smp_spdmatrixcholeskyinverse(real_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep)
double sparsegetdiagonal(sparsematrix *s, ae_int_t i, ae_state *_state)
void sparsemv2(const sparsematrix &s, const real_1d_array &x, real_1d_array &y0, real_1d_array &y1)
void _pexec_cmatrixlu(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *pivots, ae_state *_state)
void sparsetransposesks(const sparsematrix &s)
void normestimatorestimatesparse(normestimatorstate *state, sparsematrix *a, ae_state *_state)
void _eigsubspacereport_init(void *_p, ae_state *_state)
double beta(const double a, const double b)
void eigsubspacesetcond(const eigsubspacestate &state, const double eps, const ae_int_t maxits)
ae_bool normestimatoriteration(normestimatorstate *state, ae_state *_state)
void _matinvreport_destroy(void *_p)
void sparsefree(sparsematrix *s, ae_state *_state)
alglib_impl::matinvreport * c_ptr()
void rmatrixtrinverse(real_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit, ae_int_t &info, matinvreport &rep)
void smp_cmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t i2, const ae_int_t j2)
void cmatrixcopy(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_matrix *b, ae_int_t ib, ae_int_t jb, ae_state *_state)
void spdmatrixrndcond(ae_int_t n, double c, ae_matrix *a, ae_state *_state)
void cmatrixqr(complex_2d_array &a, const ae_int_t m, const ae_int_t n, complex_1d_array &tau)
alglib_impl::matinvreport * p_struct
Definition: linalg.h:259
void _eigsubspacereport_clear(void *_p)
void rmatrixsyrk(ae_int_t n, ae_int_t k, double alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, double beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_bool isupper, ae_state *_state)
bool smatrixevdi(const real_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, const ae_int_t i1, const ae_int_t i2, real_1d_array &w, real_2d_array &z)
void cmatrixqrunpackr(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, complex_2d_array &r)
bool sparserewriteexisting(const sparsematrix &s, const ae_int_t i, const ae_int_t j, const double v)
ae_bool sparseenumerate(sparsematrix *s, ae_int_t *t0, ae_int_t *t1, ae_int_t *i, ae_int_t *j, double *v, ae_state *_state)
void normestimatorsetseed(const normestimatorstate &state, const ae_int_t seedval)
void _pexec_rmatrixqrunpackq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_int_t qcolumns, ae_matrix *q, ae_state *_state)
void spdmatrixcholeskyupdateadd1buf(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_vector *u, ae_vector *bufr, ae_state *_state)
void sparseadd(const sparsematrix &s, const ae_int_t i, const ae_int_t j, const double v)
alglib_impl::eigsubspacestate * c_ptr()
double rmatrixlurcond1(const real_2d_array &lua, const ae_int_t n)
void smp_cmatrixluinverse(complex_2d_array &a, const integer_1d_array &pivots, const ae_int_t n, ae_int_t &info, matinvreport &rep)
void rmatrixrndorthogonalfromtheleft(ae_matrix *a, ae_int_t m, ae_int_t n, ae_state *_state)
void eigsubspacesetcond(eigsubspacestate *state, double eps, ae_int_t maxits, ae_state *_state)
void _eigsubspacestate_clear(void *_p)
double beta(double a, double b, ae_state *_state)
void sparseset(sparsematrix *s, ae_int_t i, ae_int_t j, double v, ae_state *_state)
ae_bool smatrixevdi(ae_matrix *a, ae_int_t n, ae_int_t zneeded, ae_bool isupper, ae_int_t i1, ae_int_t i2, ae_vector *w, ae_matrix *z, ae_state *_state)
void cmatrixtrinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit, ae_int_t &info, matinvreport &rep)
void _pexec_spdmatrixinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_int_t *info, matinvreport *rep, ae_state *_state)
void spdmatrixinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_int_t *info, matinvreport *rep, ae_state *_state)
void eigsubspaceoocgetrequestdata(const eigsubspacestate &state, real_2d_array &x)
double rmatrixrcondinf(const real_2d_array &a, const ae_int_t n)
void rmatrixbdunpackq(const real_2d_array &qp, const ae_int_t m, const ae_int_t n, const real_1d_array &tauq, const ae_int_t qcolumns, real_2d_array &q)
void cmatrixlefttrsm(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const complex_2d_array &x, const ae_int_t i2, const ae_int_t j2)
void rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc)
double spdmatrixrcond(const real_2d_array &a, const ae_int_t n, const bool isupper)
void normestimatorresults(const normestimatorstate &state, double &nrm)
void cmatrixrndorthogonalfromtheleft(ae_matrix *a, ae_int_t m, ae_int_t n, ae_state *_state)
double cmatrixrcond1(ae_matrix *a, ae_int_t n, ae_state *_state)
void _pexec_spdmatrixcholeskyinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_int_t *info, matinvreport *rep, ae_state *_state)
void smp_hpdmatrixcholeskyinverse(complex_2d_array &a, const ae_int_t n, const bool isupper, ae_int_t &info, matinvreport &rep)
void smp_rmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2)
void rmatrixqr(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_state *_state)
bool smatrixgevdreduce(real_2d_array &a, const ae_int_t n, const bool isuppera, const real_2d_array &b, const bool isupperb, const ae_int_t problemtype, real_2d_array &r, bool &isupperr)
double spdmatrixrcond(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void sparsecreatebuf(ae_int_t m, ae_int_t n, ae_int_t k, sparsematrix *s, ae_state *_state)
void _pexec_cmatrixgemm(ae_int_t m, ae_int_t n, ae_int_t k, ae_complex alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, ae_matrix *b, ae_int_t ib, ae_int_t jb, ae_int_t optypeb, ae_complex beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_state *_state)
double cmatrixtrrcondinf(const complex_2d_array &a, const ae_int_t n, const bool isupper, const bool isunit)
void cmatrixcopy(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, complex_2d_array &b, const ae_int_t ib, const ae_int_t jb)
bool eigsubspaceooccontinue(const eigsubspacestate &state)
void spdmatrixcholeskyinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_int_t *info, matinvreport *rep, ae_state *_state)
void cmatrixrndcond(const ae_int_t n, const double c, complex_2d_array &a)
void sparseset(const sparsematrix &s, const ae_int_t i, const ae_int_t j, const double v)
void sparsemm(sparsematrix *s, ae_matrix *a, ae_int_t k, ae_matrix *b, ae_state *_state)
ae_bool hmatrixevdr(ae_matrix *a, ae_int_t n, ae_int_t zneeded, ae_bool isupper, double b1, double b2, ae_int_t *m, ae_vector *w, ae_matrix *z, ae_state *_state)
ae_bool smatrixtdevd(ae_vector *d, ae_vector *e, ae_int_t n, ae_int_t zneeded, ae_matrix *z, ae_state *_state)
void hpdmatrixcholeskyinverse(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_int_t *info, matinvreport *rep, ae_state *_state)
void sparsecreate(ae_int_t m, ae_int_t n, ae_int_t k, sparsematrix *s, ae_state *_state)
double sparsegetdiagonal(const sparsematrix &s, const ae_int_t i)
void _pexec_rmatrixqr(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_state *_state)
void _normestimatorstate_destroy(void *_p)
void rmatrixinvupdatecolumn(ae_matrix *inva, ae_int_t n, ae_int_t updcolumn, ae_vector *u, ae_state *_state)
void sparseconvertto(sparsematrix *s0, ae_int_t fmt, ae_state *_state)
void fblscgcreate(ae_vector *x, ae_vector *b, ae_int_t n, fblslincgstate *state, ae_state *_state)
void _pexec_cmatrixqr(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_state *_state)
void spdmatrixcholeskyupdateadd1(const real_2d_array &a, const ae_int_t n, const bool isupper, const real_1d_array &u)
void smp_rmatrixgemm(const ae_int_t m, const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const real_2d_array &b, const ae_int_t ib, const ae_int_t jb, const ae_int_t optypeb, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc)
void rmatrixrndorthogonalfromtheright(ae_matrix *a, ae_int_t m, ae_int_t n, ae_state *_state)
void _eigsubspacestate_init(void *_p, ae_state *_state)
void sparsecreatecrsbuf(ae_int_t m, ae_int_t n, ae_vector *ner, sparsematrix *s, ae_state *_state)
void _normestimatorstate_clear(void *_p)
void cmatrixtranspose(const ae_int_t m, const ae_int_t n, const complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, complex_2d_array &b, const ae_int_t ib, const ae_int_t jb)
void sparsecopytocrs(const sparsematrix &s0, sparsematrix &s1)
void eigsubspaceoocstart(eigsubspacestate *state, ae_int_t mtype, ae_state *_state)
void smp_cmatrixqrunpackq(const complex_2d_array &a, const ae_int_t m, const ae_int_t n, const complex_1d_array &tau, const ae_int_t qcolumns, complex_2d_array &q)
bool sparseenumerate(const sparsematrix &s, ae_int_t &t0, ae_int_t &t1, ae_int_t &i, ae_int_t &j, double &v)
void cmatrixrank1(const ae_int_t m, const ae_int_t n, complex_2d_array &a, const ae_int_t ia, const ae_int_t ja, complex_1d_array &u, const ae_int_t iu, complex_1d_array &v, const ae_int_t iv)
void smp_cmatrixinverse(complex_2d_array &a, const ae_int_t n, ae_int_t &info, matinvreport &rep)
void _pexec_cmatrixlqunpackq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_int_t qrows, ae_matrix *q, ae_state *_state)
ae_bool smatrixtdevdi(ae_vector *d, ae_vector *e, ae_int_t n, ae_int_t zneeded, ae_int_t i1, ae_int_t i2, ae_matrix *z, ae_state *_state)
ae_bool hmatrixevd(ae_matrix *a, ae_int_t n, ae_int_t zneeded, ae_bool isupper, ae_vector *d, ae_matrix *z, ae_state *_state)
void sparsesmm(const sparsematrix &s, const bool isupper, const real_2d_array &a, const ae_int_t k, real_2d_array &b)
void eigsubspaceoocgetrequestinfo(eigsubspacestate *state, ae_int_t *requesttype, ae_int_t *requestsize, ae_state *_state)
void eigsubspaceoocgetrequestdata(eigsubspacestate *state, ae_matrix *x, ae_state *_state)
double cmatrixlurcond1(ae_matrix *lua, ae_int_t n, ae_state *_state)
alglib_impl::normestimatorstate * c_ptr()
void _pexec_cmatrixherk(ae_int_t n, ae_int_t k, double alpha, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, double beta, ae_matrix *c, ae_int_t ic, ae_int_t jc, ae_bool isupper, ae_state *_state)
double rmatrixrcond1(ae_matrix *a, ae_int_t n, ae_state *_state)
double sparseget(sparsematrix *s, ae_int_t i, ae_int_t j, ae_state *_state)
ae_int_t sparsegetlowercount(const sparsematrix &s)
void cmatrixlup(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *pivots, ae_state *_state)
void rmatrixinvupdateuv(ae_matrix *inva, ae_int_t n, ae_vector *u, ae_vector *v, ae_state *_state)
void rmatrixmv(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_int_t opa, ae_vector *x, ae_int_t ix, ae_vector *y, ae_int_t iy, ae_state *_state)
double cmatrixtrrcondinf(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_bool isunit, ae_state *_state)
double rmatrixtrrcondinf(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_bool isunit, ae_state *_state)
void _sparsematrix_destroy(void *_p)
bool sparseishash(const sparsematrix &s)
void rmatrixrndorthogonal(const ae_int_t n, real_2d_array &a)
void rmatrixtranspose(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_matrix *b, ae_int_t ib, ae_int_t jb, ae_state *_state)
double spdmatrixcholeskydet(ae_matrix *a, ae_int_t n, ae_state *_state)
matinvreport & operator=(const matinvreport &rhs)
ae_bool bidiagonalsvddecomposition(ae_vector *d, ae_vector *e, ae_int_t n, ae_bool isupper, ae_bool isfractionalaccuracyrequired, ae_matrix *u, ae_int_t nru, ae_matrix *c, ae_int_t ncc, ae_matrix *vt, ae_int_t ncvt, ae_state *_state)
void sparsecopy(sparsematrix *s0, sparsematrix *s1, ae_state *_state)
void eigsubspacecreatebuf(ae_int_t n, ae_int_t k, eigsubspacestate *state, ae_state *_state)
void rmatrixqrunpackq(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const real_1d_array &tau, const ae_int_t qcolumns, real_2d_array &q)
void _pexec_rmatrixlqunpackq(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *tau, ae_int_t qrows, ae_matrix *q, ae_state *_state)
void smatrixtd(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_vector *tau, ae_vector *d, ae_vector *e, ae_state *_state)
void cmatrixrank1(ae_int_t m, ae_int_t n, ae_matrix *a, ae_int_t ia, ae_int_t ja, ae_vector *u, ae_int_t iu, ae_vector *v, ae_int_t iv, ae_state *_state)
ptrdiff_t ae_int_t
Definition: ap.h:185
ae_int_t ablascomplexblocksize(ae_matrix *a, ae_state *_state)
void smp_eigsubspacesolvedenses(const eigsubspacestate &state, const real_2d_array &a, const bool isupper, real_1d_array &w, real_2d_array &z, eigsubspacereport &rep)
normestimatorstate(const normestimatorstate &rhs)
bool rmatrixevd(const real_2d_array &a, const ae_int_t n, const ae_int_t vneeded, real_1d_array &wr, real_1d_array &wi, real_2d_array &vl, real_2d_array &vr)
ae_bool sparserewriteexisting(sparsematrix *s, ae_int_t i, ae_int_t j, double v, ae_state *_state)
ae_bool rmatrixbdsvd(ae_vector *d, ae_vector *e, ae_int_t n, ae_bool isupper, ae_bool isfractionalaccuracyrequired, ae_matrix *u, ae_int_t nru, ae_matrix *c, ae_int_t ncc, ae_matrix *vt, ae_int_t ncvt, ae_state *_state)
void eigsubspaceoocgetrequestinfo(const eigsubspacestate &state, ae_int_t &requesttype, ae_int_t &requestsize)
void sparsegetrow(const sparsematrix &s, const ae_int_t i, real_1d_array &irow)
void sparseadd(sparsematrix *s, ae_int_t i, ae_int_t j, double v, ae_state *_state)
void sparsecreatecrsinplace(sparsematrix *s, ae_state *_state)
void sparsemm2(sparsematrix *s, ae_matrix *a, ae_int_t k, ae_matrix *b0, ae_matrix *b1, ae_state *_state)
void smp_rmatrixqrunpackq(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const real_1d_array &tau, const ae_int_t qcolumns, real_2d_array &q)
double rcondthreshold(ae_state *_state)
alglib_impl::eigsubspacereport * p_struct
Definition: linalg.h:351
bool smp_rmatrixsvd(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const ae_int_t uneeded, const ae_int_t vtneeded, const ae_int_t additionalmemory, real_1d_array &w, real_2d_array &u, real_2d_array &vt)
void sparsecreatebuf(const ae_int_t m, const ae_int_t n, const ae_int_t k, const sparsematrix &s)
void sparsemv(sparsematrix *s, ae_vector *x, ae_vector *y, ae_state *_state)
double rmatrixdet(ae_matrix *a, ae_int_t n, ae_state *_state)
void smp_rmatrixinverse(real_2d_array &a, const ae_int_t n, ae_int_t &info, matinvreport &rep)
void rmatrixbdmultiplybyq(const real_2d_array &qp, const ae_int_t m, const ae_int_t n, const real_1d_array &tauq, real_2d_array &z, const ae_int_t zrows, const ae_int_t zcolumns, const bool fromtheright, const bool dotranspose)
ae_int_t ablasmicroblocksize(ae_state *_state)
void spdmatrixcholeskyupdateadd1(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_vector *u, ae_state *_state)
bool hmatrixevdr(const complex_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, const double b1, const double b2, ae_int_t &m, real_1d_array &w, complex_2d_array &z)
ae_bool smatrixgevd(ae_matrix *a, ae_int_t n, ae_bool isuppera, ae_matrix *b, ae_bool isupperb, ae_int_t zneeded, ae_int_t problemtype, ae_vector *d, ae_matrix *z, ae_state *_state)
alglib_impl::sparsebuffers * p_struct
Definition: linalg.h:227
bool smatrixtdevdr(real_1d_array &d, const real_1d_array &e, const ae_int_t n, const ae_int_t zneeded, const double a, const double b, ae_int_t &m, real_2d_array &z)
double spdmatrixcholeskyrcond(const real_2d_array &a, const ae_int_t n, const bool isupper)
double spdmatrixcholeskyrcond(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_state *_state)
void rmatrixlup(ae_matrix *a, ae_int_t m, ae_int_t n, ae_vector *pivots, ae_state *_state)
void rmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2)
void hmatrixtdunpackq(ae_matrix *a, ae_int_t n, ae_bool isupper, ae_vector *tau, ae_matrix *q, ae_state *_state)
void rmatrixbdmultiplybyp(const real_2d_array &qp, const ae_int_t m, const ae_int_t n, const real_1d_array &taup, real_2d_array &z, const ae_int_t zrows, const ae_int_t zcolumns, const bool fromtheright, const bool dotranspose)
double sparsevsmv(const sparsematrix &s, const bool isupper, const real_1d_array &x)
void smp_rmatrixlqunpackq(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const real_1d_array &tau, const ae_int_t qrows, real_2d_array &q)
void eigsubspacecreate(const ae_int_t n, const ae_int_t k, eigsubspacestate &state)
void eigsubspaceoocstart(const eigsubspacestate &state, const ae_int_t mtype)
bool hmatrixevd(const complex_2d_array &a, const ae_int_t n, const ae_int_t zneeded, const bool isupper, real_1d_array &d, complex_2d_array &z)
double sparsevsmv(sparsematrix *s, ae_bool isupper, ae_vector *x, ae_state *_state)
Page URL: http://wiki.math.ethz.ch/bin/view/Concepts/WebHome
21 August 2020
© 2020 Eidgenössische Technische Hochschule Zürich