dataanalysis.h

Go to the documentation of this file.
1 /*************************************************************************
2 ALGLIB 3.11.0 (source code generated 2017-05-11)
3 Copyright (c) Sergey Bochkanov (ALGLIB project).
4 
5 >>> SOURCE LICENSE >>>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation (www.fsf.org); either version 2 of the
9 License, or (at your option) any later version.
10 
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15 
16 A copy of the GNU General Public License is available at
17 http://www.fsf.org/licensing/licenses
18 >>> END OF LICENSE >>>
19 *************************************************************************/
20 #ifndef _dataanalysis_pkg_h
21 #define _dataanalysis_pkg_h
22 #include "ap.h"
23 #include "alglibinternal.h"
24 #include "linalg.h"
25 #include "alglibmisc.h"
26 #include "statistics.h"
27 #include "specialfunctions.h"
28 #include "solvers.h"
29 #include "optimization.h"
30 
32 //
33 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
34 //
36 namespace alglib_impl
37 {
38 typedef struct
39 {
40  double relclserror;
41  double avgce;
42  double rmserror;
43  double avgerror;
44  double avgrelerror;
45 } cvreport;
46 typedef struct
47 {
48  double relclserror;
49  double avgce;
50  double rmserror;
51  double avgerror;
52  double avgrelerror;
53 } modelerrors;
54 typedef struct
55 {
56  double f;
57  ae_vector g;
58 } smlpgrad;
59 typedef struct
60 {
61  ae_int_t hlnetworktype;
62  ae_int_t hlnormtype;
63  ae_vector hllayersizes;
64  ae_vector hlconnections;
65  ae_vector hlneurons;
66  ae_vector structinfo;
67  ae_vector weights;
68  ae_vector columnmeans;
69  ae_vector columnsigmas;
70  ae_vector neurons;
71  ae_vector dfdnet;
72  ae_vector derror;
73  ae_vector x;
76  ae_vector xyrow;
77  ae_vector nwbuf;
78  ae_vector integerbuf;
80  ae_vector rndbuf;
82  ae_shared_pool gradbuf;
83  ae_matrix dummydxy;
84  sparsematrix dummysxy;
85  ae_vector dummyidx;
86  ae_shared_pool dummypool;
88 typedef struct
89 {
92 typedef struct
93 {
95  double rmserror;
96  double avgerror;
97  double avgrelerror;
98  double cvrmserror;
99  double cvavgerror;
100  double cvavgrelerror;
101  ae_int_t ncvdefects;
102  ae_vector cvdefects;
104 typedef struct
105 {
107 } logitmodel;
108 typedef struct
109 {
110  ae_bool brackt;
111  ae_bool stage1;
112  ae_int_t infoc;
113  double dg;
114  double dgm;
115  double dginit;
116  double dgtest;
117  double dgx;
118  double dgxm;
119  double dgy;
120  double dgym;
121  double finit;
122  double ftest1;
123  double fm;
124  double fx;
125  double fxm;
126  double fy;
127  double fym;
128  double stx;
129  double sty;
130  double stmin;
131  double stmax;
132  double width;
133  double width1;
134  double xtrapf;
136 typedef struct
137 {
138  ae_int_t ngrad;
139  ae_int_t nhess;
141 typedef struct
142 {
144  ae_vector states;
145  ae_int_t npairs;
146  ae_matrix data;
148  ae_matrix bndl;
149  ae_matrix bndu;
152  ae_int_t ccnt;
153  ae_vector pw;
154  ae_matrix priorp;
155  double regterm;
157  ae_int_t repinneriterationscount;
158  ae_int_t repouteriterationscount;
159  ae_int_t repnfev;
160  ae_int_t repterminationtype;
162  ae_vector tmpp;
163  ae_vector effectivew;
164  ae_vector effectivebndl;
165  ae_vector effectivebndu;
166  ae_matrix effectivec;
167  ae_vector effectivect;
171 typedef struct
172 {
173  ae_int_t inneriterationscount;
174  ae_int_t outeriterationscount;
175  ae_int_t nfev;
176  ae_int_t terminationtype;
178 typedef struct
179 {
180  ae_int_t ensemblesize;
181  ae_vector weights;
182  ae_vector columnmeans;
183  ae_vector columnsigmas;
187 typedef struct
188 {
189  double relclserror;
190  double avgce;
191  double rmserror;
192  double avgerror;
193  double avgrelerror;
194  ae_int_t ngrad;
195  ae_int_t nhess;
196  ae_int_t ncholesky;
197 } mlpreport;
198 typedef struct
199 {
200  double relclserror;
201  double avgce;
202  double rmserror;
203  double avgerror;
204  double avgrelerror;
206 typedef struct
207 {
208  ae_vector bestparameters;
209  double bestrmserror;
210  ae_bool randomizenetwork;
212  minlbfgsstate optimizer;
213  minlbfgsreport optimizerrep;
214  ae_vector wbuf0;
215  ae_vector wbuf1;
216  ae_vector allminibatches;
217  ae_vector currentminibatch;
218  rcommstate rstate;
219  ae_int_t algoused;
220  ae_int_t minibatchsize;
221  hqrndstate generator;
223 typedef struct
224 {
225  ae_vector trnsubset;
226  ae_vector valsubset;
227  ae_shared_pool mlpsessions;
228  mlpreport mlprep;
231 typedef struct
232 {
233  ae_int_t nin;
234  ae_int_t nout;
235  ae_bool rcpar;
236  ae_int_t lbfgsfactor;
237  double decay;
238  double wstep;
239  ae_int_t maxits;
240  ae_int_t datatype;
241  ae_int_t npoints;
242  ae_matrix densexy;
243  sparsematrix sparsexy;
244  smlptrnsession session;
245  ae_int_t ngradbatch;
246  ae_vector subset;
247  ae_int_t subsetsize;
248  ae_vector valsubset;
249  ae_int_t valsubsetsize;
250  ae_int_t algokind;
251  ae_int_t minibatchsize;
253 typedef struct
254 {
257  ae_vector subset;
258  ae_int_t subsetsize;
259  ae_vector xyrow;
261  ae_int_t ngrad;
262  ae_shared_pool trnpool;
264 typedef struct
265 {
267  ae_matrix ctbest;
268  ae_vector xycbest;
269  ae_vector xycprev;
270  ae_vector d2;
271  ae_vector csizes;
272  apbuffers initbuf;
273  ae_shared_pool updatepool;
275 typedef struct
276 {
277  ae_int_t npoints;
278  ae_int_t nfeatures;
279  ae_int_t disttype;
281  ae_matrix d;
282  ae_int_t ahcalgo;
283  ae_int_t kmeansrestarts;
284  ae_int_t kmeansmaxits;
285  ae_int_t kmeansinitalgo;
286  ae_bool kmeansdbgnoits;
287  ae_matrix tmpd;
288  apbuffers distbuf;
289  kmeansbuffers kmeanstmp;
291 typedef struct
292 {
293  ae_int_t terminationtype;
294  ae_int_t npoints;
299  ae_vector mergedist;
301 typedef struct
302 {
303  ae_int_t npoints;
304  ae_int_t nfeatures;
305  ae_int_t terminationtype;
306  ae_int_t iterationscount;
307  double energy;
308  ae_int_t k;
310  ae_vector cidx;
312 typedef struct
313 {
314  ae_int_t nvars;
315  ae_int_t nclasses;
316  ae_int_t ntrees;
317  ae_int_t bufsize;
318  ae_vector trees;
320 typedef struct
321 {
322  double relclserror;
323  double avgce;
324  double rmserror;
325  double avgerror;
326  double avgrelerror;
327  double oobrelclserror;
328  double oobavgce;
329  double oobrmserror;
330  double oobavgerror;
331  double oobavgrelerror;
333 typedef struct
334 {
335  ae_vector treebuf;
336  ae_vector idxbuf;
337  ae_vector tmpbufr;
338  ae_vector tmpbufr2;
339  ae_vector tmpbufi;
340  ae_vector classibuf;
341  ae_vector sortrbuf;
342  ae_vector sortrbuf2;
343  ae_vector sortibuf;
344  ae_vector varpool;
345  ae_vector evsbin;
346  ae_vector evssplits;
348 
349 }
350 
352 //
353 // THIS SECTION CONTAINS C++ INTERFACE
354 //
356 namespace alglib
357 {
358 
359 
360 
361 
362 
363 /*************************************************************************
364 Model's errors:
365  * RelCLSError - fraction of misclassified cases.
366  * AvgCE - acerage cross-entropy
367  * RMSError - root-mean-square error
368  * AvgError - average error
369  * AvgRelError - average relative error
370 
371 NOTE 1: RelCLSError/AvgCE are zero on regression problems.
372 
373 NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain
374  errors in prediction of posterior probabilities
375 *************************************************************************/
377 {
378 public:
382  virtual ~_modelerrors_owner();
385 protected:
387 };
389 {
390 public:
394  virtual ~modelerrors();
395  double &relclserror;
396  double &avgce;
397  double &rmserror;
398  double &avgerror;
399  double &avgrelerror;
400 
401 };
402 
403 
404 /*************************************************************************
405 
406 *************************************************************************/
408 {
409 public:
416 protected:
418 };
420 {
421 public:
426 
427 };
428 
429 
430 
431 /*************************************************************************
432 
433 *************************************************************************/
435 {
436 public:
443 protected:
445 };
447 {
448 public:
452  virtual ~linearmodel();
453 
454 };
455 
456 
457 /*************************************************************************
458 LRReport structure contains additional information about linear model:
459 * C - covariation matrix, array[0..NVars,0..NVars].
460  C[i,j] = Cov(A[i],A[j])
461 * RMSError - root mean square error on a training set
462 * AvgError - average error on a training set
463 * AvgRelError - average relative error on a training set (excluding
464  observations with zero function value).
465 * CVRMSError - leave-one-out cross-validation estimate of
466  generalization error. Calculated using fast algorithm
467  with O(NVars*NPoints) complexity.
468 * CVAvgError - cross-validation estimate of average error
469 * CVAvgRelError - cross-validation estimate of average relative error
470 
471 All other fields of the structure are intended for internal use and should
472 not be used outside ALGLIB.
473 *************************************************************************/
475 {
476 public:
477  _lrreport_owner();
478  _lrreport_owner(const _lrreport_owner &rhs);
480  virtual ~_lrreport_owner();
482  alglib_impl::lrreport* c_ptr() const;
483 protected:
485 };
486 class lrreport : public _lrreport_owner
487 {
488 public:
490  lrreport(const lrreport &rhs);
492  virtual ~lrreport();
494  double &rmserror;
495  double &avgerror;
496  double &avgrelerror;
497  double &cvrmserror;
498  double &cvavgerror;
499  double &cvavgrelerror;
502 
503 };
504 
505 
506 
507 /*************************************************************************
508 
509 *************************************************************************/
511 {
512 public:
519 protected:
521 };
523 {
524 public:
526  logitmodel(const logitmodel &rhs);
528  virtual ~logitmodel();
529 
530 };
531 
532 
533 /*************************************************************************
534 MNLReport structure contains information about training process:
535 * NGrad - number of gradient calculations
536 * NHess - number of Hessian calculations
537 *************************************************************************/
539 {
540 public:
544  virtual ~_mnlreport_owner();
547 protected:
549 };
551 {
552 public:
554  mnlreport(const mnlreport &rhs);
556  virtual ~mnlreport();
559 
560 };
561 
562 /*************************************************************************
563 This structure is a MCPD (Markov Chains for Population Data) solver.
564 
565 You should use ALGLIB functions in order to work with this object.
566 
567  -- ALGLIB --
568  Copyright 23.05.2010 by Bochkanov Sergey
569 *************************************************************************/
571 {
572 public:
576  virtual ~_mcpdstate_owner();
578  alglib_impl::mcpdstate* c_ptr() const;
579 protected:
581 };
583 {
584 public:
586  mcpdstate(const mcpdstate &rhs);
588  virtual ~mcpdstate();
589 
590 };
591 
592 
593 /*************************************************************************
594 This structure is a MCPD training report:
595  InnerIterationsCount - number of inner iterations of the
596  underlying optimization algorithm
597  OuterIterationsCount - number of outer iterations of the
598  underlying optimization algorithm
599  NFEV - number of merit function evaluations
600  TerminationType - termination type
601  (same as for MinBLEIC optimizer, positive
602  values denote success, negative ones -
603  failure)
604 
605  -- ALGLIB --
606  Copyright 23.05.2010 by Bochkanov Sergey
607 *************************************************************************/
609 {
610 public:
614  virtual ~_mcpdreport_owner();
617 protected:
619 };
621 {
622 public:
624  mcpdreport(const mcpdreport &rhs);
626  virtual ~mcpdreport();
631 
632 };
633 
634 /*************************************************************************
635 Neural networks ensemble
636 *************************************************************************/
638 {
639 public:
646 protected:
648 };
650 {
651 public:
655  virtual ~mlpensemble();
656 
657 };
658 
659 /*************************************************************************
660 Training report:
661  * RelCLSError - fraction of misclassified cases.
662  * AvgCE - acerage cross-entropy
663  * RMSError - root-mean-square error
664  * AvgError - average error
665  * AvgRelError - average relative error
666  * NGrad - number of gradient calculations
667  * NHess - number of Hessian calculations
668  * NCholesky - number of Cholesky decompositions
669 
670 NOTE 1: RelCLSError/AvgCE are zero on regression problems.
671 
672 NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain
673  errors in prediction of posterior probabilities
674 *************************************************************************/
676 {
677 public:
681  virtual ~_mlpreport_owner();
683  alglib_impl::mlpreport* c_ptr() const;
684 protected:
686 };
688 {
689 public:
691  mlpreport(const mlpreport &rhs);
693  virtual ~mlpreport();
694  double &relclserror;
695  double &avgce;
696  double &rmserror;
697  double &avgerror;
698  double &avgrelerror;
702 
703 };
704 
705 
706 /*************************************************************************
707 Cross-validation estimates of generalization error
708 *************************************************************************/
710 {
711 public:
718 protected:
720 };
722 {
723 public:
727  virtual ~mlpcvreport();
728  double &relclserror;
729  double &avgce;
730  double &rmserror;
731  double &avgerror;
732  double &avgrelerror;
733 
734 };
735 
736 
737 /*************************************************************************
738 Trainer object for neural network.
739 
740 You should not try to access fields of this object directly - use ALGLIB
741 functions to work with this object.
742 *************************************************************************/
744 {
745 public:
749  virtual ~_mlptrainer_owner();
752 protected:
754 };
756 {
757 public:
759  mlptrainer(const mlptrainer &rhs);
761  virtual ~mlptrainer();
762 
763 };
764 
765 /*************************************************************************
766 This structure is a clusterization engine.
767 
768 You should not try to access its fields directly.
769 Use ALGLIB functions in order to work with this object.
770 
771  -- ALGLIB --
772  Copyright 10.07.2012 by Bochkanov Sergey
773 *************************************************************************/
775 {
776 public:
780  virtual ~_clusterizerstate_owner();
783 protected:
785 };
787 {
788 public:
792  virtual ~clusterizerstate();
793 
794 };
795 
796 
797 /*************************************************************************
798 This structure is used to store results of the agglomerative hierarchical
799 clustering (AHC).
800 
801 Following information is returned:
802 
803 * TerminationType - completion code:
804  * 1 for successful completion of algorithm
805  * -5 inappropriate combination of clustering algorithm and distance
806  function was used. As for now, it is possible only when Ward's
807  method is called for dataset with non-Euclidean distance function.
808  In case negative completion code is returned, other fields of report
809  structure are invalid and should not be used.
810 
811 * NPoints contains number of points in the original dataset
812 
813 * Z contains information about merges performed (see below). Z contains
814  indexes from the original (unsorted) dataset and it can be used when you
815  need to know what points were merged. However, it is not convenient when
816  you want to build a dendrograd (see below).
817 
818 * if you want to build dendrogram, you can use Z, but it is not good
819  option, because Z contains indexes from unsorted dataset. Dendrogram
820  built from such dataset is likely to have intersections. So, you have to
821  reorder you points before building dendrogram.
822  Permutation which reorders point is returned in P. Another representation
823  of merges, which is more convenient for dendorgram construction, is
824  returned in PM.
825 
826 * more information on format of Z, P and PM can be found below and in the
827  examples from ALGLIB Reference Manual.
828 
829 FORMAL DESCRIPTION OF FIELDS:
830  NPoints number of points
831  Z array[NPoints-1,2], contains indexes of clusters
832  linked in pairs to form clustering tree. I-th row
833  corresponds to I-th merge:
834  * Z[I,0] - index of the first cluster to merge
835  * Z[I,1] - index of the second cluster to merge
836  * Z[I,0]<Z[I,1]
837  * clusters are numbered from 0 to 2*NPoints-2, with
838  indexes from 0 to NPoints-1 corresponding to points
839  of the original dataset, and indexes from NPoints to
840  2*NPoints-2 correspond to clusters generated by
841  subsequent merges (I-th row of Z creates cluster
842  with index NPoints+I).
843 
844  IMPORTANT: indexes in Z[] are indexes in the ORIGINAL,
845  unsorted dataset. In addition to Z algorithm outputs
846  permutation which rearranges points in such way that
847  subsequent merges are performed on adjacent points
848  (such order is needed if you want to build dendrogram).
849  However, indexes in Z are related to original,
850  unrearranged sequence of points.
851 
852  P array[NPoints], permutation which reorders points for
853  dendrogram construction. P[i] contains index of the
854  position where we should move I-th point of the
855  original dataset in order to apply merges PZ/PM.
856 
857  PZ same as Z, but for permutation of points given by P.
858  The only thing which changed are indexes of the
859  original points; indexes of clusters remained same.
860 
861  MergeDist array[NPoints-1], contains distances between clusters
862  being merged (MergeDist[i] correspond to merge stored
863  in Z[i,...]):
864  * CLINK, SLINK and average linkage algorithms report
865  "raw", unmodified distance metric.
866  * Ward's method reports weighted intra-cluster
867  variance, which is equal to ||Ca-Cb||^2 * Sa*Sb/(Sa+Sb).
868  Here A and B are clusters being merged, Ca is a
869  center of A, Cb is a center of B, Sa is a size of A,
870  Sb is a size of B.
871 
872  PM array[NPoints-1,6], another representation of merges,
873  which is suited for dendrogram construction. It deals
874  with rearranged points (permutation P is applied) and
875  represents merges in a form which different from one
876  used by Z.
877  For each I from 0 to NPoints-2, I-th row of PM represents
878  merge performed on two clusters C0 and C1. Here:
879  * C0 contains points with indexes PM[I,0]...PM[I,1]
880  * C1 contains points with indexes PM[I,2]...PM[I,3]
881  * indexes stored in PM are given for dataset sorted
882  according to permutation P
883  * PM[I,1]=PM[I,2]-1 (only adjacent clusters are merged)
884  * PM[I,0]<=PM[I,1], PM[I,2]<=PM[I,3], i.e. both
885  clusters contain at least one point
886  * heights of "subdendrograms" corresponding to C0/C1
887  are stored in PM[I,4] and PM[I,5]. Subdendrograms
888  corresponding to single-point clusters have
889  height=0. Dendrogram of the merge result has height
890  H=max(H0,H1)+1.
891 
892 NOTE: there is one-to-one correspondence between merges described by Z and
893  PM. I-th row of Z describes same merge of clusters as I-th row of PM,
894  with "left" cluster from Z corresponding to the "left" one from PM.
895 
896  -- ALGLIB --
897  Copyright 10.07.2012 by Bochkanov Sergey
898 *************************************************************************/
900 {
901 public:
905  virtual ~_ahcreport_owner();
907  alglib_impl::ahcreport* c_ptr() const;
908 protected:
910 };
912 {
913 public:
915  ahcreport(const ahcreport &rhs);
917  virtual ~ahcreport();
925 
926 };
927 
928 
929 /*************************************************************************
930 This structure is used to store results of the k-means clustering
931 algorithm.
932 
933 Following information is always returned:
934 * NPoints contains number of points in the original dataset
935 * TerminationType contains completion code, negative on failure, positive
936  on success
937 * K contains number of clusters
938 
939 For positive TerminationType we return:
940 * NFeatures contains number of variables in the original dataset
941 * C, which contains centers found by algorithm
942 * CIdx, which maps points of the original dataset to clusters
943 
944 FORMAL DESCRIPTION OF FIELDS:
945  NPoints number of points, >=0
946  NFeatures number of variables, >=1
947  TerminationType completion code:
948  * -5 if distance type is anything different from
949  Euclidean metric
950  * -3 for degenerate dataset: a) less than K distinct
951  points, b) K=0 for non-empty dataset.
952  * +1 for successful completion
953  K number of clusters
954  C array[K,NFeatures], rows of the array store centers
955  CIdx array[NPoints], which contains cluster indexes
956  IterationsCount actual number of iterations performed by clusterizer.
957  If algorithm performed more than one random restart,
958  total number of iterations is returned.
959  Energy merit function, "energy", sum of squared deviations
960  from cluster centers
961 
962  -- ALGLIB --
963  Copyright 27.11.2012 by Bochkanov Sergey
964 *************************************************************************/
966 {
967 public:
971  virtual ~_kmeansreport_owner();
974 protected:
976 };
978 {
979 public:
983  virtual ~kmeansreport();
988  double &energy;
992 
993 };
994 
995 /*************************************************************************
996 
997 *************************************************************************/
999 {
1000 public:
1007 protected:
1009 };
1011 {
1012 public:
1016  virtual ~decisionforest();
1017 
1018 };
1019 
1020 
1021 /*************************************************************************
1022 
1023 *************************************************************************/
1025 {
1026 public:
1027  _dfreport_owner();
1028  _dfreport_owner(const _dfreport_owner &rhs);
1030  virtual ~_dfreport_owner();
1033 protected:
1035 };
1037 {
1038 public:
1040  dfreport(const dfreport &rhs);
1042  virtual ~dfreport();
1043  double &relclserror;
1044  double &avgce;
1045  double &rmserror;
1046  double &avgerror;
1047  double &avgrelerror;
1049  double &oobavgce;
1050  double &oobrmserror;
1051  double &oobavgerror;
1053 
1054 };
1055 
1056 /*************************************************************************
1057 Principal components analysis
1058 
1059 This function builds orthogonal basis where first axis corresponds to
1060 direction with maximum variance, second axis maximizes variance in the
1061 subspace orthogonal to first axis and so on.
1062 
1063 This function builds FULL basis, i.e. returns N vectors corresponding to
1064 ALL directions, no matter how informative. If you need just a few (say,
1065 10 or 50) of the most important directions, you may find it faster to use
1066 one of the reduced versions:
1067 * pcatruncatedsubspace() - for subspace iteration based method
1068 
1069 It should be noted that, unlike LDA, PCA does not use class labels.
1070 
1071 COMMERCIAL EDITION OF ALGLIB:
1072 
1073  ! Commercial version of ALGLIB includes two important improvements of
1074  ! this function, which can be used from C++ and C#:
1075  ! * multithreading support
1076  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
1077  !
1078  ! Multithreading typically gives sublinear (wrt to cores count) speedup,
1079  ! because only some parts of the algorithm can be parallelized.
1080  !
1081  ! Intel MKL gives approximately constant (with respect to number of
1082  ! worker threads) acceleration factor which depends on CPU being used,
1083  ! problem size and "baseline" ALGLIB edition which is used for
1084  ! comparison. Best results are achieved for high-dimensional problems
1085  ! (NVars is at least 256).
1086  !
1087  ! We recommend you to read 'Working with commercial version' section of
1088  ! ALGLIB Reference Manual in order to find out how to use performance-
1089  ! related features provided by commercial edition of ALGLIB.
1090 
1091 INPUT PARAMETERS:
1092  X - dataset, array[0..NPoints-1,0..NVars-1].
1093  matrix contains ONLY INDEPENDENT VARIABLES.
1094  NPoints - dataset size, NPoints>=0
1095  NVars - number of independent variables, NVars>=1
1096 
1097 OUTPUT PARAMETERS:
1098  Info - return code:
1099  * -4, if SVD subroutine haven't converged
1100  * -1, if wrong parameters has been passed (NPoints<0,
1101  NVars<1)
1102  * 1, if task is solved
1103  S2 - array[0..NVars-1]. variance values corresponding
1104  to basis vectors.
1105  V - array[0..NVars-1,0..NVars-1]
1106  matrix, whose columns store basis vectors.
1107 
1108  -- ALGLIB --
1109  Copyright 25.08.2008 by Bochkanov Sergey
1110 *************************************************************************/
1111 void pcabuildbasis(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, real_1d_array &s2, real_2d_array &v);
1112 void smp_pcabuildbasis(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, real_1d_array &s2, real_2d_array &v);
1113 
1114 
1115 /*************************************************************************
1116 Principal components analysis
1117 
1118 This function performs truncated PCA, i.e. returns just a few most important
1119 directions.
1120 
1121 Internally it uses iterative eigensolver which is very efficient when only
1122 a minor fraction of full basis is required. Thus, if you need full basis,
1123 it is better to use pcabuildbasis() function.
1124 
1125 It should be noted that, unlike LDA, PCA does not use class labels.
1126 
1127 COMMERCIAL EDITION OF ALGLIB:
1128 
1129  ! Commercial version of ALGLIB includes two important improvements of
1130  ! this function, which can be used from C++ and C#:
1131  ! * multithreading support
1132  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
1133  !
1134  ! For a situation when you need just a few eigenvectors (~1-10),
1135  ! multithreading typically gives sublinear (wrt to cores count) speedup.
1136  ! For larger problems it may give you nearly linear increase in
1137  ! performance.
1138  !
1139  ! Intel MKL gives approximately constant (with respect to number of
1140  ! worker threads) acceleration factor which depends on CPU being used,
1141  ! problem size and "baseline" ALGLIB edition which is used for
1142  ! comparison. Best results are achieved for high-dimensional problems
1143  ! (NVars is at least 256).
1144  !
1145  ! We recommend you to read 'Working with commercial version' section of
1146  ! ALGLIB Reference Manual in order to find out how to use performance-
1147  ! related features provided by commercial edition of ALGLIB.
1148 
1149 INPUT PARAMETERS:
1150  X - dataset, array[0..NPoints-1,0..NVars-1].
1151  matrix contains ONLY INDEPENDENT VARIABLES.
1152  NPoints - dataset size, NPoints>=0
1153  NVars - number of independent variables, NVars>=1
1154  NNeeded - number of requested directions, in [1,NVars] range;
1155  this function is efficient only for NNeeded<<NVars.
1156  Eps - desired precision of vectors returned; underlying
1157  solver will stop iterations as soon as absolute error
1158  in corresponding singular values reduces to roughly
1159  eps*MAX(lambda[]), with lambda[] being array of eigen
1160  values.
1161  Zero value means that algorithm performs number of
1162  iterations specified by maxits parameter, without
1163  paying attention to precision.
1164  MaxIts - number of iterations performed by subspace iteration
1165  method. Zero value means that no limit on iteration
1166  count is placed (eps-based stopping condition is used).
1167 
1168 
1169 OUTPUT PARAMETERS:
1170  S2 - array[NNeeded]. Variance values corresponding
1171  to basis vectors.
1172  V - array[NVars,NNeeded]
1173  matrix, whose columns store basis vectors.
1174 
1175 NOTE: passing eps=0 and maxits=0 results in small eps being selected as
1176 stopping condition. Exact value of automatically selected eps is version-
1177 -dependent.
1178 
1179  -- ALGLIB --
1180  Copyright 10.01.2017 by Bochkanov Sergey
1181 *************************************************************************/
1182 void pcatruncatedsubspace(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nneeded, const double eps, const ae_int_t maxits, real_1d_array &s2, real_2d_array &v);
1183 void smp_pcatruncatedsubspace(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nneeded, const double eps, const ae_int_t maxits, real_1d_array &s2, real_2d_array &v);
1184 
1185 /*************************************************************************
1186 Optimal binary classification
1187 
1188 Algorithms finds optimal (=with minimal cross-entropy) binary partition.
1189 Internal subroutine.
1190 
1191 INPUT PARAMETERS:
1192  A - array[0..N-1], variable
1193  C - array[0..N-1], class numbers (0 or 1).
1194  N - array size
1195 
1196 OUTPUT PARAMETERS:
1197  Info - completetion code:
1198  * -3, all values of A[] are same (partition is impossible)
1199  * -2, one of C[] is incorrect (<0, >1)
1200  * -1, incorrect pararemets were passed (N<=0).
1201  * 1, OK
1202  Threshold- partiton boundary. Left part contains values which are
1203  strictly less than Threshold. Right part contains values
1204  which are greater than or equal to Threshold.
1205  PAL, PBL- probabilities P(0|v<Threshold) and P(1|v<Threshold)
1206  PAR, PBR- probabilities P(0|v>=Threshold) and P(1|v>=Threshold)
1207  CVE - cross-validation estimate of cross-entropy
1208 
1209  -- ALGLIB --
1210  Copyright 22.05.2008 by Bochkanov Sergey
1211 *************************************************************************/
1212 void dsoptimalsplit2(const real_1d_array &a, const integer_1d_array &c, const ae_int_t n, ae_int_t &info, double &threshold, double &pal, double &pbl, double &par, double &pbr, double &cve);
1213 
1214 
1215 /*************************************************************************
1216 Optimal partition, internal subroutine. Fast version.
1217 
1218 Accepts:
1219  A array[0..N-1] array of attributes array[0..N-1]
1220  C array[0..N-1] array of class labels
1221  TiesBuf array[0..N] temporaries (ties)
1222  CntBuf array[0..2*NC-1] temporaries (counts)
1223  Alpha centering factor (0<=alpha<=1, recommended value - 0.05)
1224  BufR array[0..N-1] temporaries
1225  BufI array[0..N-1] temporaries
1226 
1227 Output:
1228  Info error code (">0"=OK, "<0"=bad)
1229  RMS training set RMS error
1230  CVRMS leave-one-out RMS error
1231 
1232 Note:
1233  content of all arrays is changed by subroutine;
1234  it doesn't allocate temporaries.
1235 
1236  -- ALGLIB --
1237  Copyright 11.12.2008 by Bochkanov Sergey
1238 *************************************************************************/
1239 void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_array &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int_t &info, double &threshold, double &rms, double &cvrms);
1240 
1241 /*************************************************************************
1242 This function serializes data structure to string.
1243 
1244 Important properties of s_out:
1245 * it contains alphanumeric characters, dots, underscores, minus signs
1246 * these symbols are grouped into words, which are separated by spaces
1247  and Windows-style (CR+LF) newlines
1248 * although serializer uses spaces and CR+LF as separators, you can
1249  replace any separator character by arbitrary combination of spaces,
1250  tabs, Windows or Unix newlines. It allows flexible reformatting of
1251  the string in case you want to include it into text or XML file.
1252  But you should not insert separators into the middle of the "words"
1253  nor you should change case of letters.
1254 * s_out can be freely moved between 32-bit and 64-bit systems, little
1255  and big endian machines, and so on. You can serialize structure on
1256  32-bit machine and unserialize it on 64-bit one (or vice versa), or
1257  serialize it on SPARC and unserialize on x86. You can also
1258  serialize it in C++ version of ALGLIB and unserialize in C# one,
1259  and vice versa.
1260 *************************************************************************/
1261 void mlpserialize(multilayerperceptron &obj, std::string &s_out);
1262 
1263 
1264 /*************************************************************************
1265 This function unserializes data structure from string.
1266 *************************************************************************/
1267 void mlpunserialize(const std::string &s_in, multilayerperceptron &obj);
1268 
1269 
1270 
1271 
1272 /*************************************************************************
1273 This function serializes data structure to C++ stream.
1274 
1275 Data stream generated by this function is same as string representation
1276 generated by string version of serializer - alphanumeric characters,
1277 dots, underscores, minus signs, which are grouped into words separated by
1278 spaces and CR+LF.
1279 
1280 We recommend you to read comments on string version of serializer to find
1281 out more about serialization of AlGLIB objects.
1282 *************************************************************************/
1283 void mlpserialize(multilayerperceptron &obj, std::ostream &s_out);
1284 
1285 
1286 /*************************************************************************
1287 This function unserializes data structure from stream.
1288 *************************************************************************/
1289 void mlpunserialize(const std::istream &s_in, multilayerperceptron &obj);
1290 
1291 
1292 /*************************************************************************
1293 Creates neural network with NIn inputs, NOut outputs, without hidden
1294 layers, with linear output layer. Network weights are filled with small
1295 random values.
1296 
1297  -- ALGLIB --
1298  Copyright 04.11.2007 by Bochkanov Sergey
1299 *************************************************************************/
1300 void mlpcreate0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network);
1301 
1302 
1303 /*************************************************************************
1304 Same as MLPCreate0, but with one hidden layer (NHid neurons) with
1305 non-linear activation function. Output layer is linear.
1306 
1307  -- ALGLIB --
1308  Copyright 04.11.2007 by Bochkanov Sergey
1309 *************************************************************************/
1310 void mlpcreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network);
1311 
1312 
1313 /*************************************************************************
1314 Same as MLPCreate0, but with two hidden layers (NHid1 and NHid2 neurons)
1315 with non-linear activation function. Output layer is linear.
1316  $ALL
1317 
1318  -- ALGLIB --
1319  Copyright 04.11.2007 by Bochkanov Sergey
1320 *************************************************************************/
1321 void mlpcreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network);
1322 
1323 
1324 /*************************************************************************
1325 Creates neural network with NIn inputs, NOut outputs, without hidden
1326 layers with non-linear output layer. Network weights are filled with small
1327 random values.
1328 
1329 Activation function of the output layer takes values:
1330 
1331  (B, +INF), if D>=0
1332 
1333 or
1334 
1335  (-INF, B), if D<0.
1336 
1337 
1338  -- ALGLIB --
1339  Copyright 30.03.2008 by Bochkanov Sergey
1340 *************************************************************************/
1341 void mlpcreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, multilayerperceptron &network);
1342 
1343 
1344 /*************************************************************************
1345 Same as MLPCreateB0 but with non-linear hidden layer.
1346 
1347  -- ALGLIB --
1348  Copyright 30.03.2008 by Bochkanov Sergey
1349 *************************************************************************/
1350 void mlpcreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, multilayerperceptron &network);
1351 
1352 
1353 /*************************************************************************
1354 Same as MLPCreateB0 but with two non-linear hidden layers.
1355 
1356  -- ALGLIB --
1357  Copyright 30.03.2008 by Bochkanov Sergey
1358 *************************************************************************/
1359 void mlpcreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, multilayerperceptron &network);
1360 
1361 
1362 /*************************************************************************
1363 Creates neural network with NIn inputs, NOut outputs, without hidden
1364 layers with non-linear output layer. Network weights are filled with small
1365 random values. Activation function of the output layer takes values [A,B].
1366 
1367  -- ALGLIB --
1368  Copyright 30.03.2008 by Bochkanov Sergey
1369 *************************************************************************/
1370 void mlpcreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, multilayerperceptron &network);
1371 
1372 
1373 /*************************************************************************
1374 Same as MLPCreateR0, but with non-linear hidden layer.
1375 
1376  -- ALGLIB --
1377  Copyright 30.03.2008 by Bochkanov Sergey
1378 *************************************************************************/
1379 void mlpcreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, multilayerperceptron &network);
1380 
1381 
1382 /*************************************************************************
1383 Same as MLPCreateR0, but with two non-linear hidden layers.
1384 
1385  -- ALGLIB --
1386  Copyright 30.03.2008 by Bochkanov Sergey
1387 *************************************************************************/
1388 void mlpcreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, multilayerperceptron &network);
1389 
1390 
1391 /*************************************************************************
1392 Creates classifier network with NIn inputs and NOut possible classes.
1393 Network contains no hidden layers and linear output layer with SOFTMAX-
1394 normalization (so outputs sums up to 1.0 and converge to posterior
1395 probabilities).
1396 
1397  -- ALGLIB --
1398  Copyright 04.11.2007 by Bochkanov Sergey
1399 *************************************************************************/
1400 void mlpcreatec0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network);
1401 
1402 
1403 /*************************************************************************
1404 Same as MLPCreateC0, but with one non-linear hidden layer.
1405 
1406  -- ALGLIB --
1407  Copyright 04.11.2007 by Bochkanov Sergey
1408 *************************************************************************/
1409 void mlpcreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network);
1410 
1411 
1412 /*************************************************************************
1413 Same as MLPCreateC0, but with two non-linear hidden layers.
1414 
1415  -- ALGLIB --
1416  Copyright 04.11.2007 by Bochkanov Sergey
1417 *************************************************************************/
1418 void mlpcreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network);
1419 
1420 
1421 /*************************************************************************
1422 Copying of neural network
1423 
1424 INPUT PARAMETERS:
1425  Network1 - original
1426 
1427 OUTPUT PARAMETERS:
1428  Network2 - copy
1429 
1430  -- ALGLIB --
1431  Copyright 04.11.2007 by Bochkanov Sergey
1432 *************************************************************************/
1433 void mlpcopy(const multilayerperceptron &network1, multilayerperceptron &network2);
1434 
1435 
1436 /*************************************************************************
1437 This function copies tunable parameters (weights/means/sigmas) from one
1438 network to another with same architecture. It performs some rudimentary
1439 checks that architectures are same, and throws exception if check fails.
1440 
1441 It is intended for fast copying of states between two network which are
1442 known to have same geometry.
1443 
1444 INPUT PARAMETERS:
1445  Network1 - source, must be correctly initialized
1446  Network2 - target, must have same architecture
1447 
1448 OUTPUT PARAMETERS:
1449  Network2 - network state is copied from source to target
1450 
1451  -- ALGLIB --
1452  Copyright 20.06.2013 by Bochkanov Sergey
1453 *************************************************************************/
1455 
1456 
1457 /*************************************************************************
1458 Randomization of neural network weights
1459 
1460  -- ALGLIB --
1461  Copyright 06.11.2007 by Bochkanov Sergey
1462 *************************************************************************/
1463 void mlprandomize(const multilayerperceptron &network);
1464 
1465 
1466 /*************************************************************************
1467 Randomization of neural network weights and standartisator
1468 
1469  -- ALGLIB --
1470  Copyright 10.03.2008 by Bochkanov Sergey
1471 *************************************************************************/
1473 
1474 
1475 /*************************************************************************
1476 Internal subroutine.
1477 
1478  -- ALGLIB --
1479  Copyright 30.03.2008 by Bochkanov Sergey
1480 *************************************************************************/
1481 void mlpinitpreprocessor(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize);
1482 
1483 
1484 /*************************************************************************
1485 Returns information about initialized network: number of inputs, outputs,
1486 weights.
1487 
1488  -- ALGLIB --
1489  Copyright 04.11.2007 by Bochkanov Sergey
1490 *************************************************************************/
1491 void mlpproperties(const multilayerperceptron &network, ae_int_t &nin, ae_int_t &nout, ae_int_t &wcount);
1492 
1493 
1494 /*************************************************************************
1495 Returns number of inputs.
1496 
1497  -- ALGLIB --
1498  Copyright 19.10.2011 by Bochkanov Sergey
1499 *************************************************************************/
1501 
1502 
1503 /*************************************************************************
1504 Returns number of outputs.
1505 
1506  -- ALGLIB --
1507  Copyright 19.10.2011 by Bochkanov Sergey
1508 *************************************************************************/
1510 
1511 
1512 /*************************************************************************
1513 Returns number of weights.
1514 
1515  -- ALGLIB --
1516  Copyright 19.10.2011 by Bochkanov Sergey
1517 *************************************************************************/
1519 
1520 
1521 /*************************************************************************
1522 Tells whether network is SOFTMAX-normalized (i.e. classifier) or not.
1523 
1524  -- ALGLIB --
1525  Copyright 04.11.2007 by Bochkanov Sergey
1526 *************************************************************************/
1527 bool mlpissoftmax(const multilayerperceptron &network);
1528 
1529 
1530 /*************************************************************************
1531 This function returns total number of layers (including input, hidden and
1532 output layers).
1533 
1534  -- ALGLIB --
1535  Copyright 25.03.2011 by Bochkanov Sergey
1536 *************************************************************************/
1538 
1539 
1540 /*************************************************************************
1541 This function returns size of K-th layer.
1542 
1543 K=0 corresponds to input layer, K=CNT-1 corresponds to output layer.
1544 
1545 Size of the output layer is always equal to the number of outputs, although
1546 when we have softmax-normalized network, last neuron doesn't have any
1547 connections - it is just zero.
1548 
1549  -- ALGLIB --
1550  Copyright 25.03.2011 by Bochkanov Sergey
1551 *************************************************************************/
1553 
1554 
1555 /*************************************************************************
1556 This function returns offset/scaling coefficients for I-th input of the
1557 network.
1558 
1559 INPUT PARAMETERS:
1560  Network - network
1561  I - input index
1562 
1563 OUTPUT PARAMETERS:
1564  Mean - mean term
1565  Sigma - sigma term, guaranteed to be nonzero.
1566 
1567 I-th input is passed through linear transformation
1568  IN[i] = (IN[i]-Mean)/Sigma
1569 before feeding to the network
1570 
1571  -- ALGLIB --
1572  Copyright 25.03.2011 by Bochkanov Sergey
1573 *************************************************************************/
1574 void mlpgetinputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma);
1575 
1576 
1577 /*************************************************************************
1578 This function returns offset/scaling coefficients for I-th output of the
1579 network.
1580 
1581 INPUT PARAMETERS:
1582  Network - network
1583  I - input index
1584 
1585 OUTPUT PARAMETERS:
1586  Mean - mean term
1587  Sigma - sigma term, guaranteed to be nonzero.
1588 
1589 I-th output is passed through linear transformation
1590  OUT[i] = OUT[i]*Sigma+Mean
1591 before returning it to user. In case we have SOFTMAX-normalized network,
1592 we return (Mean,Sigma)=(0.0,1.0).
1593 
1594  -- ALGLIB --
1595  Copyright 25.03.2011 by Bochkanov Sergey
1596 *************************************************************************/
1597 void mlpgetoutputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma);
1598 
1599 
1600 /*************************************************************************
1601 This function returns information about Ith neuron of Kth layer
1602 
1603 INPUT PARAMETERS:
1604  Network - network
1605  K - layer index
1606  I - neuron index (within layer)
1607 
1608 OUTPUT PARAMETERS:
1609  FKind - activation function type (used by MLPActivationFunction())
1610  this value is zero for input or linear neurons
1611  Threshold - also called offset, bias
1612  zero for input neurons
1613 
1614 NOTE: this function throws exception if layer or neuron with given index
1615 do not exists.
1616 
1617  -- ALGLIB --
1618  Copyright 25.03.2011 by Bochkanov Sergey
1619 *************************************************************************/
1620 void mlpgetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, ae_int_t &fkind, double &threshold);
1621 
1622 
1623 /*************************************************************************
1624 This function returns information about connection from I0-th neuron of
1625 K0-th layer to I1-th neuron of K1-th layer.
1626 
1627 INPUT PARAMETERS:
1628  Network - network
1629  K0 - layer index
1630  I0 - neuron index (within layer)
1631  K1 - layer index
1632  I1 - neuron index (within layer)
1633 
1634 RESULT:
1635  connection weight (zero for non-existent connections)
1636 
1637 This function:
1638 1. throws exception if layer or neuron with given index do not exists.
1639 2. returns zero if neurons exist, but there is no connection between them
1640 
1641  -- ALGLIB --
1642  Copyright 25.03.2011 by Bochkanov Sergey
1643 *************************************************************************/
1644 double mlpgetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1);
1645 
1646 
1647 /*************************************************************************
1648 This function sets offset/scaling coefficients for I-th input of the
1649 network.
1650 
1651 INPUT PARAMETERS:
1652  Network - network
1653  I - input index
1654  Mean - mean term
1655  Sigma - sigma term (if zero, will be replaced by 1.0)
1656 
1657 NTE: I-th input is passed through linear transformation
1658  IN[i] = (IN[i]-Mean)/Sigma
1659 before feeding to the network. This function sets Mean and Sigma.
1660 
1661  -- ALGLIB --
1662  Copyright 25.03.2011 by Bochkanov Sergey
1663 *************************************************************************/
1664 void mlpsetinputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma);
1665 
1666 
1667 /*************************************************************************
1668 This function sets offset/scaling coefficients for I-th output of the
1669 network.
1670 
1671 INPUT PARAMETERS:
1672  Network - network
1673  I - input index
1674  Mean - mean term
1675  Sigma - sigma term (if zero, will be replaced by 1.0)
1676 
1677 OUTPUT PARAMETERS:
1678 
1679 NOTE: I-th output is passed through linear transformation
1680  OUT[i] = OUT[i]*Sigma+Mean
1681 before returning it to user. This function sets Sigma/Mean. In case we
1682 have SOFTMAX-normalized network, you can not set (Sigma,Mean) to anything
1683 other than(0.0,1.0) - this function will throw exception.
1684 
1685  -- ALGLIB --
1686  Copyright 25.03.2011 by Bochkanov Sergey
1687 *************************************************************************/
1688 void mlpsetoutputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma);
1689 
1690 
1691 /*************************************************************************
1692 This function modifies information about Ith neuron of Kth layer
1693 
1694 INPUT PARAMETERS:
1695  Network - network
1696  K - layer index
1697  I - neuron index (within layer)
1698  FKind - activation function type (used by MLPActivationFunction())
1699  this value must be zero for input neurons
1700  (you can not set activation function for input neurons)
1701  Threshold - also called offset, bias
1702  this value must be zero for input neurons
1703  (you can not set threshold for input neurons)
1704 
1705 NOTES:
1706 1. this function throws exception if layer or neuron with given index do
1707  not exists.
1708 2. this function also throws exception when you try to set non-linear
1709  activation function for input neurons (any kind of network) or for output
1710  neurons of classifier network.
1711 3. this function throws exception when you try to set non-zero threshold for
1712  input neurons (any kind of network).
1713 
1714  -- ALGLIB --
1715  Copyright 25.03.2011 by Bochkanov Sergey
1716 *************************************************************************/
1717 void mlpsetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, const ae_int_t fkind, const double threshold);
1718 
1719 
1720 /*************************************************************************
1721 This function modifies information about connection from I0-th neuron of
1722 K0-th layer to I1-th neuron of K1-th layer.
1723 
1724 INPUT PARAMETERS:
1725  Network - network
1726  K0 - layer index
1727  I0 - neuron index (within layer)
1728  K1 - layer index
1729  I1 - neuron index (within layer)
1730  W - connection weight (must be zero for non-existent
1731  connections)
1732 
1733 This function:
1734 1. throws exception if layer or neuron with given index do not exists.
1735 2. throws exception if you try to set non-zero weight for non-existent
1736  connection
1737 
1738  -- ALGLIB --
1739  Copyright 25.03.2011 by Bochkanov Sergey
1740 *************************************************************************/
1741 void mlpsetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1, const double w);
1742 
1743 
1744 /*************************************************************************
1745 Neural network activation function
1746 
1747 INPUT PARAMETERS:
1748  NET - neuron input
1749  K - function index (zero for linear function)
1750 
1751 OUTPUT PARAMETERS:
1752  F - function
1753  DF - its derivative
1754  D2F - its second derivative
1755 
1756  -- ALGLIB --
1757  Copyright 04.11.2007 by Bochkanov Sergey
1758 *************************************************************************/
1759 void mlpactivationfunction(const double net, const ae_int_t k, double &f, double &df, double &d2f);
1760 
1761 
1762 /*************************************************************************
1763 Procesing
1764 
1765 INPUT PARAMETERS:
1766  Network - neural network
1767  X - input vector, array[0..NIn-1].
1768 
1769 OUTPUT PARAMETERS:
1770  Y - result. Regression estimate when solving regression task,
1771  vector of posterior probabilities for classification task.
1772 
1773 See also MLPProcessI
1774 
1775  -- ALGLIB --
1776  Copyright 04.11.2007 by Bochkanov Sergey
1777 *************************************************************************/
1778 void mlpprocess(const multilayerperceptron &network, const real_1d_array &x, real_1d_array &y);
1779 
1780 
1781 /*************************************************************************
1782 'interactive' variant of MLPProcess for languages like Python which
1783 support constructs like "Y = MLPProcess(NN,X)" and interactive mode of the
1784 interpreter
1785 
1786 This function allocates new array on each call, so it is significantly
1787 slower than its 'non-interactive' counterpart, but it is more convenient
1788 when you call it from command line.
1789 
1790  -- ALGLIB --
1791  Copyright 21.09.2010 by Bochkanov Sergey
1792 *************************************************************************/
1794 
1795 
1796 /*************************************************************************
1797 Error of the neural network on dataset.
1798 
1799 
1800 FOR USERS OF COMMERCIAL EDITION:
1801 
1802  ! Commercial version of ALGLIB includes two important improvements of
1803  ! this function:
1804  ! * multicore support (C++ and C# computational cores)
1805  ! * SSE support
1806  !
1807  ! First improvement gives close-to-linear speedup on multicore systems.
1808  ! Second improvement gives constant speedup (2-3x, depending on your CPU)
1809  !
1810  ! In order to use multicore features you have to:
1811  ! * use commercial version of ALGLIB
1812  ! * call this function with "smp_" prefix, which indicates that
1813  ! multicore code will be used (for multicore support)
1814  !
1815  ! In order to use SSE features you have to:
1816  ! * use commercial version of ALGLIB on Intel processors
1817  ! * use C++ computational core
1818  !
1819  ! This note is given for users of commercial edition; if you use GPL
1820  ! edition, you still will be able to call smp-version of this function,
1821  ! but all computations will be done serially.
1822  !
1823  ! We recommend you to carefully read ALGLIB Reference Manual, section
1824  ! called 'SMP support', before using parallel version of this function.
1825 
1826 
1827 INPUT PARAMETERS:
1828  Network - neural network;
1829  XY - training set, see below for information on the
1830  training set format;
1831  NPoints - points count.
1832 
1833 RESULT:
1834  sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
1835 
1836 DATASET FORMAT:
1837 
1838 This function uses two different dataset formats - one for regression
1839 networks, another one for classification networks.
1840 
1841 For regression networks with NIn inputs and NOut outputs following dataset
1842 format is used:
1843 * dataset is given by NPoints*(NIn+NOut) matrix
1844 * each row corresponds to one example
1845 * first NIn columns are inputs, next NOut columns are outputs
1846 
1847 For classification networks with NIn inputs and NClasses clases following
1848 dataset format is used:
1849 * dataset is given by NPoints*(NIn+1) matrix
1850 * each row corresponds to one example
1851 * first NIn columns are inputs, last column stores class number (from 0 to
1852  NClasses-1).
1853 
1854  -- ALGLIB --
1855  Copyright 04.11.2007 by Bochkanov Sergey
1856 *************************************************************************/
1857 double mlperror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
1858 double smp_mlperror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
1859 
1860 
1861 /*************************************************************************
1862 Error of the neural network on dataset given by sparse matrix.
1863 
1864 
1865 FOR USERS OF COMMERCIAL EDITION:
1866 
1867  ! Commercial version of ALGLIB includes two important improvements of
1868  ! this function:
1869  ! * multicore support (C++ and C# computational cores)
1870  ! * SSE support
1871  !
1872  ! First improvement gives close-to-linear speedup on multicore systems.
1873  ! Second improvement gives constant speedup (2-3x, depending on your CPU)
1874  !
1875  ! In order to use multicore features you have to:
1876  ! * use commercial version of ALGLIB
1877  ! * call this function with "smp_" prefix, which indicates that
1878  ! multicore code will be used (for multicore support)
1879  !
1880  ! In order to use SSE features you have to:
1881  ! * use commercial version of ALGLIB on Intel processors
1882  ! * use C++ computational core
1883  !
1884  ! This note is given for users of commercial edition; if you use GPL
1885  ! edition, you still will be able to call smp-version of this function,
1886  ! but all computations will be done serially.
1887  !
1888  ! We recommend you to carefully read ALGLIB Reference Manual, section
1889  ! called 'SMP support', before using parallel version of this function.
1890 
1891 
1892 INPUT PARAMETERS:
1893  Network - neural network
1894  XY - training set, see below for information on the
1895  training set format. This function checks correctness
1896  of the dataset (no NANs/INFs, class numbers are
1897  correct) and throws exception when incorrect dataset
1898  is passed. Sparse matrix must use CRS format for
1899  storage.
1900  NPoints - points count, >=0
1901 
1902 RESULT:
1903  sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
1904 
1905 DATASET FORMAT:
1906 
1907 This function uses two different dataset formats - one for regression
1908 networks, another one for classification networks.
1909 
1910 For regression networks with NIn inputs and NOut outputs following dataset
1911 format is used:
1912 * dataset is given by NPoints*(NIn+NOut) matrix
1913 * each row corresponds to one example
1914 * first NIn columns are inputs, next NOut columns are outputs
1915 
1916 For classification networks with NIn inputs and NClasses clases following
1917 dataset format is used:
1918 * dataset is given by NPoints*(NIn+1) matrix
1919 * each row corresponds to one example
1920 * first NIn columns are inputs, last column stores class number (from 0 to
1921  NClasses-1).
1922 
1923  -- ALGLIB --
1924  Copyright 23.07.2012 by Bochkanov Sergey
1925 *************************************************************************/
1926 double mlperrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
1927 double smp_mlperrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
1928 
1929 
1930 /*************************************************************************
1931 Natural error function for neural network, internal subroutine.
1932 
1933 NOTE: this function is single-threaded. Unlike other error function, it
1934 receives no speed-up from being executed in SMP mode.
1935 
1936  -- ALGLIB --
1937  Copyright 04.11.2007 by Bochkanov Sergey
1938 *************************************************************************/
1939 double mlperrorn(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize);
1940 
1941 
1942 /*************************************************************************
1943 Classification error of the neural network on dataset.
1944 
1945 
1946 FOR USERS OF COMMERCIAL EDITION:
1947 
1948  ! Commercial version of ALGLIB includes two important improvements of
1949  ! this function:
1950  ! * multicore support (C++ and C# computational cores)
1951  ! * SSE support
1952  !
1953  ! First improvement gives close-to-linear speedup on multicore systems.
1954  ! Second improvement gives constant speedup (2-3x depending on your CPU)
1955  !
1956  ! In order to use multicore features you have to:
1957  ! * use commercial version of ALGLIB
1958  ! * call this function with "smp_" prefix, which indicates that
1959  ! multicore code will be used (for multicore support)
1960  !
1961  ! In order to use SSE features you have to:
1962  ! * use commercial version of ALGLIB on Intel processors
1963  ! * use C++ computational core
1964  !
1965  ! This note is given for users of commercial edition; if you use GPL
1966  ! edition, you still will be able to call smp-version of this function,
1967  ! but all computations will be done serially.
1968  !
1969  ! We recommend you to carefully read ALGLIB Reference Manual, section
1970  ! called 'SMP support', before using parallel version of this function.
1971 
1972 
1973 INPUT PARAMETERS:
1974  Network - neural network;
1975  XY - training set, see below for information on the
1976  training set format;
1977  NPoints - points count.
1978 
1979 RESULT:
1980  classification error (number of misclassified cases)
1981 
1982 DATASET FORMAT:
1983 
1984 This function uses two different dataset formats - one for regression
1985 networks, another one for classification networks.
1986 
1987 For regression networks with NIn inputs and NOut outputs following dataset
1988 format is used:
1989 * dataset is given by NPoints*(NIn+NOut) matrix
1990 * each row corresponds to one example
1991 * first NIn columns are inputs, next NOut columns are outputs
1992 
1993 For classification networks with NIn inputs and NClasses clases following
1994 dataset format is used:
1995 * dataset is given by NPoints*(NIn+1) matrix
1996 * each row corresponds to one example
1997 * first NIn columns are inputs, last column stores class number (from 0 to
1998  NClasses-1).
1999 
2000  -- ALGLIB --
2001  Copyright 04.11.2007 by Bochkanov Sergey
2002 *************************************************************************/
2003 ae_int_t mlpclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2004 ae_int_t smp_mlpclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2005 
2006 
2007 /*************************************************************************
2008 Relative classification error on the test set.
2009 
2010 
2011 FOR USERS OF COMMERCIAL EDITION:
2012 
2013  ! Commercial version of ALGLIB includes two important improvements of
2014  ! this function:
2015  ! * multicore support (C++ and C# computational cores)
2016  ! * SSE support
2017  !
2018  ! First improvement gives close-to-linear speedup on multicore systems.
2019  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2020  !
2021  ! In order to use multicore features you have to:
2022  ! * use commercial version of ALGLIB
2023  ! * call this function with "smp_" prefix, which indicates that
2024  ! multicore code will be used (for multicore support)
2025  !
2026  ! In order to use SSE features you have to:
2027  ! * use commercial version of ALGLIB on Intel processors
2028  ! * use C++ computational core
2029  !
2030  ! This note is given for users of commercial edition; if you use GPL
2031  ! edition, you still will be able to call smp-version of this function,
2032  ! but all computations will be done serially.
2033  !
2034  ! We recommend you to carefully read ALGLIB Reference Manual, section
2035  ! called 'SMP support', before using parallel version of this function.
2036 
2037 
2038 INPUT PARAMETERS:
2039  Network - neural network;
2040  XY - training set, see below for information on the
2041  training set format;
2042  NPoints - points count.
2043 
2044 RESULT:
2045 Percent of incorrectly classified cases. Works both for classifier
2046 networks and general purpose networks used as classifiers.
2047 
2048 DATASET FORMAT:
2049 
2050 This function uses two different dataset formats - one for regression
2051 networks, another one for classification networks.
2052 
2053 For regression networks with NIn inputs and NOut outputs following dataset
2054 format is used:
2055 * dataset is given by NPoints*(NIn+NOut) matrix
2056 * each row corresponds to one example
2057 * first NIn columns are inputs, next NOut columns are outputs
2058 
2059 For classification networks with NIn inputs and NClasses clases following
2060 dataset format is used:
2061 * dataset is given by NPoints*(NIn+1) matrix
2062 * each row corresponds to one example
2063 * first NIn columns are inputs, last column stores class number (from 0 to
2064  NClasses-1).
2065 
2066  -- ALGLIB --
2067  Copyright 25.12.2008 by Bochkanov Sergey
2068 *************************************************************************/
2069 double mlprelclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2070 double smp_mlprelclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2071 
2072 
2073 /*************************************************************************
2074 Relative classification error on the test set given by sparse matrix.
2075 
2076 
2077 FOR USERS OF COMMERCIAL EDITION:
2078 
2079  ! Commercial version of ALGLIB includes two important improvements of
2080  ! this function:
2081  ! * multicore support (C++ and C# computational cores)
2082  ! * SSE support
2083  !
2084  ! First improvement gives close-to-linear speedup on multicore systems.
2085  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2086  !
2087  ! In order to use multicore features you have to:
2088  ! * use commercial version of ALGLIB
2089  ! * call this function with "smp_" prefix, which indicates that
2090  ! multicore code will be used (for multicore support)
2091  !
2092  ! In order to use SSE features you have to:
2093  ! * use commercial version of ALGLIB on Intel processors
2094  ! * use C++ computational core
2095  !
2096  ! This note is given for users of commercial edition; if you use GPL
2097  ! edition, you still will be able to call smp-version of this function,
2098  ! but all computations will be done serially.
2099  !
2100  ! We recommend you to carefully read ALGLIB Reference Manual, section
2101  ! called 'SMP support', before using parallel version of this function.
2102 
2103 
2104 INPUT PARAMETERS:
2105  Network - neural network;
2106  XY - training set, see below for information on the
2107  training set format. Sparse matrix must use CRS format
2108  for storage.
2109  NPoints - points count, >=0.
2110 
2111 RESULT:
2112 Percent of incorrectly classified cases. Works both for classifier
2113 networks and general purpose networks used as classifiers.
2114 
2115 DATASET FORMAT:
2116 
2117 This function uses two different dataset formats - one for regression
2118 networks, another one for classification networks.
2119 
2120 For regression networks with NIn inputs and NOut outputs following dataset
2121 format is used:
2122 * dataset is given by NPoints*(NIn+NOut) matrix
2123 * each row corresponds to one example
2124 * first NIn columns are inputs, next NOut columns are outputs
2125 
2126 For classification networks with NIn inputs and NClasses clases following
2127 dataset format is used:
2128 * dataset is given by NPoints*(NIn+1) matrix
2129 * each row corresponds to one example
2130 * first NIn columns are inputs, last column stores class number (from 0 to
2131  NClasses-1).
2132 
2133  -- ALGLIB --
2134  Copyright 09.08.2012 by Bochkanov Sergey
2135 *************************************************************************/
2136 double mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2137 double smp_mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2138 
2139 
2140 /*************************************************************************
2141 Average cross-entropy (in bits per element) on the test set.
2142 
2143 
2144 FOR USERS OF COMMERCIAL EDITION:
2145 
2146  ! Commercial version of ALGLIB includes two important improvements of
2147  ! this function:
2148  ! * multicore support (C++ and C# computational cores)
2149  ! * SSE support
2150  !
2151  ! First improvement gives close-to-linear speedup on multicore systems.
2152  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2153  !
2154  ! In order to use multicore features you have to:
2155  ! * use commercial version of ALGLIB
2156  ! * call this function with "smp_" prefix, which indicates that
2157  ! multicore code will be used (for multicore support)
2158  !
2159  ! In order to use SSE features you have to:
2160  ! * use commercial version of ALGLIB on Intel processors
2161  ! * use C++ computational core
2162  !
2163  ! This note is given for users of commercial edition; if you use GPL
2164  ! edition, you still will be able to call smp-version of this function,
2165  ! but all computations will be done serially.
2166  !
2167  ! We recommend you to carefully read ALGLIB Reference Manual, section
2168  ! called 'SMP support', before using parallel version of this function.
2169 
2170 
2171 INPUT PARAMETERS:
2172  Network - neural network;
2173  XY - training set, see below for information on the
2174  training set format;
2175  NPoints - points count.
2176 
2177 RESULT:
2178 CrossEntropy/(NPoints*LN(2)).
2179 Zero if network solves regression task.
2180 
2181 DATASET FORMAT:
2182 
2183 This function uses two different dataset formats - one for regression
2184 networks, another one for classification networks.
2185 
2186 For regression networks with NIn inputs and NOut outputs following dataset
2187 format is used:
2188 * dataset is given by NPoints*(NIn+NOut) matrix
2189 * each row corresponds to one example
2190 * first NIn columns are inputs, next NOut columns are outputs
2191 
2192 For classification networks with NIn inputs and NClasses clases following
2193 dataset format is used:
2194 * dataset is given by NPoints*(NIn+1) matrix
2195 * each row corresponds to one example
2196 * first NIn columns are inputs, last column stores class number (from 0 to
2197  NClasses-1).
2198 
2199  -- ALGLIB --
2200  Copyright 08.01.2009 by Bochkanov Sergey
2201 *************************************************************************/
2202 double mlpavgce(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2203 double smp_mlpavgce(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2204 
2205 
2206 /*************************************************************************
2207 Average cross-entropy (in bits per element) on the test set given by
2208 sparse matrix.
2209 
2210 
2211 FOR USERS OF COMMERCIAL EDITION:
2212 
2213  ! Commercial version of ALGLIB includes two important improvements of
2214  ! this function:
2215  ! * multicore support (C++ and C# computational cores)
2216  ! * SSE support
2217  !
2218  ! First improvement gives close-to-linear speedup on multicore systems.
2219  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2220  !
2221  ! In order to use multicore features you have to:
2222  ! * use commercial version of ALGLIB
2223  ! * call this function with "smp_" prefix, which indicates that
2224  ! multicore code will be used (for multicore support)
2225  !
2226  ! In order to use SSE features you have to:
2227  ! * use commercial version of ALGLIB on Intel processors
2228  ! * use C++ computational core
2229  !
2230  ! This note is given for users of commercial edition; if you use GPL
2231  ! edition, you still will be able to call smp-version of this function,
2232  ! but all computations will be done serially.
2233  !
2234  ! We recommend you to carefully read ALGLIB Reference Manual, section
2235  ! called 'SMP support', before using parallel version of this function.
2236 
2237 
2238 INPUT PARAMETERS:
2239  Network - neural network;
2240  XY - training set, see below for information on the
2241  training set format. This function checks correctness
2242  of the dataset (no NANs/INFs, class numbers are
2243  correct) and throws exception when incorrect dataset
2244  is passed. Sparse matrix must use CRS format for
2245  storage.
2246  NPoints - points count, >=0.
2247 
2248 RESULT:
2249 CrossEntropy/(NPoints*LN(2)).
2250 Zero if network solves regression task.
2251 
2252 DATASET FORMAT:
2253 
2254 This function uses two different dataset formats - one for regression
2255 networks, another one for classification networks.
2256 
2257 For regression networks with NIn inputs and NOut outputs following dataset
2258 format is used:
2259 * dataset is given by NPoints*(NIn+NOut) matrix
2260 * each row corresponds to one example
2261 * first NIn columns are inputs, next NOut columns are outputs
2262 
2263 For classification networks with NIn inputs and NClasses clases following
2264 dataset format is used:
2265 * dataset is given by NPoints*(NIn+1) matrix
2266 * each row corresponds to one example
2267 * first NIn columns are inputs, last column stores class number (from 0 to
2268  NClasses-1).
2269 
2270  -- ALGLIB --
2271  Copyright 9.08.2012 by Bochkanov Sergey
2272 *************************************************************************/
2273 double mlpavgcesparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2274 double smp_mlpavgcesparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2275 
2276 
2277 /*************************************************************************
2278 RMS error on the test set given.
2279 
2280 
2281 FOR USERS OF COMMERCIAL EDITION:
2282 
2283  ! Commercial version of ALGLIB includes two important improvements of
2284  ! this function:
2285  ! * multicore support (C++ and C# computational cores)
2286  ! * SSE support
2287  !
2288  ! First improvement gives close-to-linear speedup on multicore systems.
2289  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2290  !
2291  ! In order to use multicore features you have to:
2292  ! * use commercial version of ALGLIB
2293  ! * call this function with "smp_" prefix, which indicates that
2294  ! multicore code will be used (for multicore support)
2295  !
2296  ! In order to use SSE features you have to:
2297  ! * use commercial version of ALGLIB on Intel processors
2298  ! * use C++ computational core
2299  !
2300  ! This note is given for users of commercial edition; if you use GPL
2301  ! edition, you still will be able to call smp-version of this function,
2302  ! but all computations will be done serially.
2303  !
2304  ! We recommend you to carefully read ALGLIB Reference Manual, section
2305  ! called 'SMP support', before using parallel version of this function.
2306 
2307 
2308 INPUT PARAMETERS:
2309  Network - neural network;
2310  XY - training set, see below for information on the
2311  training set format;
2312  NPoints - points count.
2313 
2314 RESULT:
2315 Root mean square error. Its meaning for regression task is obvious. As for
2316 classification task, RMS error means error when estimating posterior
2317 probabilities.
2318 
2319 DATASET FORMAT:
2320 
2321 This function uses two different dataset formats - one for regression
2322 networks, another one for classification networks.
2323 
2324 For regression networks with NIn inputs and NOut outputs following dataset
2325 format is used:
2326 * dataset is given by NPoints*(NIn+NOut) matrix
2327 * each row corresponds to one example
2328 * first NIn columns are inputs, next NOut columns are outputs
2329 
2330 For classification networks with NIn inputs and NClasses clases following
2331 dataset format is used:
2332 * dataset is given by NPoints*(NIn+1) matrix
2333 * each row corresponds to one example
2334 * first NIn columns are inputs, last column stores class number (from 0 to
2335  NClasses-1).
2336 
2337  -- ALGLIB --
2338  Copyright 04.11.2007 by Bochkanov Sergey
2339 *************************************************************************/
2340 double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2341 double smp_mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2342 
2343 
2344 /*************************************************************************
2345 RMS error on the test set given by sparse matrix.
2346 
2347 
2348 FOR USERS OF COMMERCIAL EDITION:
2349 
2350  ! Commercial version of ALGLIB includes two important improvements of
2351  ! this function:
2352  ! * multicore support (C++ and C# computational cores)
2353  ! * SSE support
2354  !
2355  ! First improvement gives close-to-linear speedup on multicore systems.
2356  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2357  !
2358  ! In order to use multicore features you have to:
2359  ! * use commercial version of ALGLIB
2360  ! * call this function with "smp_" prefix, which indicates that
2361  ! multicore code will be used (for multicore support)
2362  !
2363  ! In order to use SSE features you have to:
2364  ! * use commercial version of ALGLIB on Intel processors
2365  ! * use C++ computational core
2366  !
2367  ! This note is given for users of commercial edition; if you use GPL
2368  ! edition, you still will be able to call smp-version of this function,
2369  ! but all computations will be done serially.
2370  !
2371  ! We recommend you to carefully read ALGLIB Reference Manual, section
2372  ! called 'SMP support', before using parallel version of this function.
2373 
2374 
2375 INPUT PARAMETERS:
2376  Network - neural network;
2377  XY - training set, see below for information on the
2378  training set format. This function checks correctness
2379  of the dataset (no NANs/INFs, class numbers are
2380  correct) and throws exception when incorrect dataset
2381  is passed. Sparse matrix must use CRS format for
2382  storage.
2383  NPoints - points count, >=0.
2384 
2385 RESULT:
2386 Root mean square error. Its meaning for regression task is obvious. As for
2387 classification task, RMS error means error when estimating posterior
2388 probabilities.
2389 
2390 DATASET FORMAT:
2391 
2392 This function uses two different dataset formats - one for regression
2393 networks, another one for classification networks.
2394 
2395 For regression networks with NIn inputs and NOut outputs following dataset
2396 format is used:
2397 * dataset is given by NPoints*(NIn+NOut) matrix
2398 * each row corresponds to one example
2399 * first NIn columns are inputs, next NOut columns are outputs
2400 
2401 For classification networks with NIn inputs and NClasses clases following
2402 dataset format is used:
2403 * dataset is given by NPoints*(NIn+1) matrix
2404 * each row corresponds to one example
2405 * first NIn columns are inputs, last column stores class number (from 0 to
2406  NClasses-1).
2407 
2408  -- ALGLIB --
2409  Copyright 09.08.2012 by Bochkanov Sergey
2410 *************************************************************************/
2411 double mlprmserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2412 double smp_mlprmserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2413 
2414 
2415 /*************************************************************************
2416 Average absolute error on the test set.
2417 
2418 
2419 FOR USERS OF COMMERCIAL EDITION:
2420 
2421  ! Commercial version of ALGLIB includes two important improvements of
2422  ! this function:
2423  ! * multicore support (C++ and C# computational cores)
2424  ! * SSE support
2425  !
2426  ! First improvement gives close-to-linear speedup on multicore systems.
2427  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2428  !
2429  ! In order to use multicore features you have to:
2430  ! * use commercial version of ALGLIB
2431  ! * call this function with "smp_" prefix, which indicates that
2432  ! multicore code will be used (for multicore support)
2433  !
2434  ! In order to use SSE features you have to:
2435  ! * use commercial version of ALGLIB on Intel processors
2436  ! * use C++ computational core
2437  !
2438  ! This note is given for users of commercial edition; if you use GPL
2439  ! edition, you still will be able to call smp-version of this function,
2440  ! but all computations will be done serially.
2441  !
2442  ! We recommend you to carefully read ALGLIB Reference Manual, section
2443  ! called 'SMP support', before using parallel version of this function.
2444 
2445 
2446 INPUT PARAMETERS:
2447  Network - neural network;
2448  XY - training set, see below for information on the
2449  training set format;
2450  NPoints - points count.
2451 
2452 RESULT:
2453 Its meaning for regression task is obvious. As for classification task, it
2454 means average error when estimating posterior probabilities.
2455 
2456 DATASET FORMAT:
2457 
2458 This function uses two different dataset formats - one for regression
2459 networks, another one for classification networks.
2460 
2461 For regression networks with NIn inputs and NOut outputs following dataset
2462 format is used:
2463 * dataset is given by NPoints*(NIn+NOut) matrix
2464 * each row corresponds to one example
2465 * first NIn columns are inputs, next NOut columns are outputs
2466 
2467 For classification networks with NIn inputs and NClasses clases following
2468 dataset format is used:
2469 * dataset is given by NPoints*(NIn+1) matrix
2470 * each row corresponds to one example
2471 * first NIn columns are inputs, last column stores class number (from 0 to
2472  NClasses-1).
2473 
2474  -- ALGLIB --
2475  Copyright 11.03.2008 by Bochkanov Sergey
2476 *************************************************************************/
2477 double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2478 double smp_mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2479 
2480 
2481 /*************************************************************************
2482 Average absolute error on the test set given by sparse matrix.
2483 
2484 
2485 FOR USERS OF COMMERCIAL EDITION:
2486 
2487  ! Commercial version of ALGLIB includes two important improvements of
2488  ! this function:
2489  ! * multicore support (C++ and C# computational cores)
2490  ! * SSE support
2491  !
2492  ! First improvement gives close-to-linear speedup on multicore systems.
2493  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2494  !
2495  ! In order to use multicore features you have to:
2496  ! * use commercial version of ALGLIB
2497  ! * call this function with "smp_" prefix, which indicates that
2498  ! multicore code will be used (for multicore support)
2499  !
2500  ! In order to use SSE features you have to:
2501  ! * use commercial version of ALGLIB on Intel processors
2502  ! * use C++ computational core
2503  !
2504  ! This note is given for users of commercial edition; if you use GPL
2505  ! edition, you still will be able to call smp-version of this function,
2506  ! but all computations will be done serially.
2507  !
2508  ! We recommend you to carefully read ALGLIB Reference Manual, section
2509  ! called 'SMP support', before using parallel version of this function.
2510 
2511 
2512 INPUT PARAMETERS:
2513  Network - neural network;
2514  XY - training set, see below for information on the
2515  training set format. This function checks correctness
2516  of the dataset (no NANs/INFs, class numbers are
2517  correct) and throws exception when incorrect dataset
2518  is passed. Sparse matrix must use CRS format for
2519  storage.
2520  NPoints - points count, >=0.
2521 
2522 RESULT:
2523 Its meaning for regression task is obvious. As for classification task, it
2524 means average error when estimating posterior probabilities.
2525 
2526 DATASET FORMAT:
2527 
2528 This function uses two different dataset formats - one for regression
2529 networks, another one for classification networks.
2530 
2531 For regression networks with NIn inputs and NOut outputs following dataset
2532 format is used:
2533 * dataset is given by NPoints*(NIn+NOut) matrix
2534 * each row corresponds to one example
2535 * first NIn columns are inputs, next NOut columns are outputs
2536 
2537 For classification networks with NIn inputs and NClasses clases following
2538 dataset format is used:
2539 * dataset is given by NPoints*(NIn+1) matrix
2540 * each row corresponds to one example
2541 * first NIn columns are inputs, last column stores class number (from 0 to
2542  NClasses-1).
2543 
2544  -- ALGLIB --
2545  Copyright 09.08.2012 by Bochkanov Sergey
2546 *************************************************************************/
2547 double mlpavgerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2548 double smp_mlpavgerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2549 
2550 
2551 /*************************************************************************
2552 Average relative error on the test set.
2553 
2554 
2555 FOR USERS OF COMMERCIAL EDITION:
2556 
2557  ! Commercial version of ALGLIB includes two important improvements of
2558  ! this function:
2559  ! * multicore support (C++ and C# computational cores)
2560  ! * SSE support
2561  !
2562  ! First improvement gives close-to-linear speedup on multicore systems.
2563  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2564  !
2565  ! In order to use multicore features you have to:
2566  ! * use commercial version of ALGLIB
2567  ! * call this function with "smp_" prefix, which indicates that
2568  ! multicore code will be used (for multicore support)
2569  !
2570  ! In order to use SSE features you have to:
2571  ! * use commercial version of ALGLIB on Intel processors
2572  ! * use C++ computational core
2573  !
2574  ! This note is given for users of commercial edition; if you use GPL
2575  ! edition, you still will be able to call smp-version of this function,
2576  ! but all computations will be done serially.
2577  !
2578  ! We recommend you to carefully read ALGLIB Reference Manual, section
2579  ! called 'SMP support', before using parallel version of this function.
2580 
2581 
2582 INPUT PARAMETERS:
2583  Network - neural network;
2584  XY - training set, see below for information on the
2585  training set format;
2586  NPoints - points count.
2587 
2588 RESULT:
2589 Its meaning for regression task is obvious. As for classification task, it
2590 means average relative error when estimating posterior probability of
2591 belonging to the correct class.
2592 
2593 DATASET FORMAT:
2594 
2595 This function uses two different dataset formats - one for regression
2596 networks, another one for classification networks.
2597 
2598 For regression networks with NIn inputs and NOut outputs following dataset
2599 format is used:
2600 * dataset is given by NPoints*(NIn+NOut) matrix
2601 * each row corresponds to one example
2602 * first NIn columns are inputs, next NOut columns are outputs
2603 
2604 For classification networks with NIn inputs and NClasses clases following
2605 dataset format is used:
2606 * dataset is given by NPoints*(NIn+1) matrix
2607 * each row corresponds to one example
2608 * first NIn columns are inputs, last column stores class number (from 0 to
2609  NClasses-1).
2610 
2611  -- ALGLIB --
2612  Copyright 11.03.2008 by Bochkanov Sergey
2613 *************************************************************************/
2614 double mlpavgrelerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2615 double smp_mlpavgrelerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints);
2616 
2617 
2618 /*************************************************************************
2619 Average relative error on the test set given by sparse matrix.
2620 
2621 
2622 FOR USERS OF COMMERCIAL EDITION:
2623 
2624  ! Commercial version of ALGLIB includes two important improvements of
2625  ! this function:
2626  ! * multicore support (C++ and C# computational cores)
2627  ! * SSE support
2628  !
2629  ! First improvement gives close-to-linear speedup on multicore systems.
2630  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2631  !
2632  ! In order to use multicore features you have to:
2633  ! * use commercial version of ALGLIB
2634  ! * call this function with "smp_" prefix, which indicates that
2635  ! multicore code will be used (for multicore support)
2636  !
2637  ! In order to use SSE features you have to:
2638  ! * use commercial version of ALGLIB on Intel processors
2639  ! * use C++ computational core
2640  !
2641  ! This note is given for users of commercial edition; if you use GPL
2642  ! edition, you still will be able to call smp-version of this function,
2643  ! but all computations will be done serially.
2644  !
2645  ! We recommend you to carefully read ALGLIB Reference Manual, section
2646  ! called 'SMP support', before using parallel version of this function.
2647 
2648 
2649 INPUT PARAMETERS:
2650  Network - neural network;
2651  XY - training set, see below for information on the
2652  training set format. This function checks correctness
2653  of the dataset (no NANs/INFs, class numbers are
2654  correct) and throws exception when incorrect dataset
2655  is passed. Sparse matrix must use CRS format for
2656  storage.
2657  NPoints - points count, >=0.
2658 
2659 RESULT:
2660 Its meaning for regression task is obvious. As for classification task, it
2661 means average relative error when estimating posterior probability of
2662 belonging to the correct class.
2663 
2664 DATASET FORMAT:
2665 
2666 This function uses two different dataset formats - one for regression
2667 networks, another one for classification networks.
2668 
2669 For regression networks with NIn inputs and NOut outputs following dataset
2670 format is used:
2671 * dataset is given by NPoints*(NIn+NOut) matrix
2672 * each row corresponds to one example
2673 * first NIn columns are inputs, next NOut columns are outputs
2674 
2675 For classification networks with NIn inputs and NClasses clases following
2676 dataset format is used:
2677 * dataset is given by NPoints*(NIn+1) matrix
2678 * each row corresponds to one example
2679 * first NIn columns are inputs, last column stores class number (from 0 to
2680  NClasses-1).
2681 
2682  -- ALGLIB --
2683  Copyright 09.08.2012 by Bochkanov Sergey
2684 *************************************************************************/
2685 double mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2686 double smp_mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints);
2687 
2688 
2689 /*************************************************************************
2690 Gradient calculation
2691 
2692 INPUT PARAMETERS:
2693  Network - network initialized with one of the network creation funcs
2694  X - input vector, length of array must be at least NIn
2695  DesiredY- desired outputs, length of array must be at least NOut
2696  Grad - possibly preallocated array. If size of array is smaller
2697  than WCount, it will be reallocated. It is recommended to
2698  reuse previously allocated array to reduce allocation
2699  overhead.
2700 
2701 OUTPUT PARAMETERS:
2702  E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
2703  Grad - gradient of E with respect to weights of network, array[WCount]
2704 
2705  -- ALGLIB --
2706  Copyright 04.11.2007 by Bochkanov Sergey
2707 *************************************************************************/
2708 void mlpgrad(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad);
2709 
2710 
2711 /*************************************************************************
2712 Gradient calculation (natural error function is used)
2713 
2714 INPUT PARAMETERS:
2715  Network - network initialized with one of the network creation funcs
2716  X - input vector, length of array must be at least NIn
2717  DesiredY- desired outputs, length of array must be at least NOut
2718  Grad - possibly preallocated array. If size of array is smaller
2719  than WCount, it will be reallocated. It is recommended to
2720  reuse previously allocated array to reduce allocation
2721  overhead.
2722 
2723 OUTPUT PARAMETERS:
2724  E - error function, sum-of-squares for regression networks,
2725  cross-entropy for classification networks.
2726  Grad - gradient of E with respect to weights of network, array[WCount]
2727 
2728  -- ALGLIB --
2729  Copyright 04.11.2007 by Bochkanov Sergey
2730 *************************************************************************/
2731 void mlpgradn(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad);
2732 
2733 
2734 /*************************************************************************
2735 Batch gradient calculation for a set of inputs/outputs
2736 
2737 
2738 FOR USERS OF COMMERCIAL EDITION:
2739 
2740  ! Commercial version of ALGLIB includes two important improvements of
2741  ! this function:
2742  ! * multicore support (C++ and C# computational cores)
2743  ! * SSE support
2744  !
2745  ! First improvement gives close-to-linear speedup on multicore systems.
2746  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2747  !
2748  ! In order to use multicore features you have to:
2749  ! * use commercial version of ALGLIB
2750  ! * call this function with "smp_" prefix, which indicates that
2751  ! multicore code will be used (for multicore support)
2752  !
2753  ! In order to use SSE features you have to:
2754  ! * use commercial version of ALGLIB on Intel processors
2755  ! * use C++ computational core
2756  !
2757  ! This note is given for users of commercial edition; if you use GPL
2758  ! edition, you still will be able to call smp-version of this function,
2759  ! but all computations will be done serially.
2760  !
2761  ! We recommend you to carefully read ALGLIB Reference Manual, section
2762  ! called 'SMP support', before using parallel version of this function.
2763 
2764 
2765 INPUT PARAMETERS:
2766  Network - network initialized with one of the network creation funcs
2767  XY - original dataset in dense format; one sample = one row:
2768  * first NIn columns contain inputs,
2769  * for regression problem, next NOut columns store
2770  desired outputs.
2771  * for classification problem, next column (just one!)
2772  stores class number.
2773  SSize - number of elements in XY
2774  Grad - possibly preallocated array. If size of array is smaller
2775  than WCount, it will be reallocated. It is recommended to
2776  reuse previously allocated array to reduce allocation
2777  overhead.
2778 
2779 OUTPUT PARAMETERS:
2780  E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
2781  Grad - gradient of E with respect to weights of network, array[WCount]
2782 
2783  -- ALGLIB --
2784  Copyright 04.11.2007 by Bochkanov Sergey
2785 *************************************************************************/
2786 void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
2787 void smp_mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
2788 
2789 
2790 /*************************************************************************
2791 Batch gradient calculation for a set of inputs/outputs given by sparse
2792 matrices
2793 
2794 
2795 FOR USERS OF COMMERCIAL EDITION:
2796 
2797  ! Commercial version of ALGLIB includes two important improvements of
2798  ! this function:
2799  ! * multicore support (C++ and C# computational cores)
2800  ! * SSE support
2801  !
2802  ! First improvement gives close-to-linear speedup on multicore systems.
2803  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2804  !
2805  ! In order to use multicore features you have to:
2806  ! * use commercial version of ALGLIB
2807  ! * call this function with "smp_" prefix, which indicates that
2808  ! multicore code will be used (for multicore support)
2809  !
2810  ! In order to use SSE features you have to:
2811  ! * use commercial version of ALGLIB on Intel processors
2812  ! * use C++ computational core
2813  !
2814  ! This note is given for users of commercial edition; if you use GPL
2815  ! edition, you still will be able to call smp-version of this function,
2816  ! but all computations will be done serially.
2817  !
2818  ! We recommend you to carefully read ALGLIB Reference Manual, section
2819  ! called 'SMP support', before using parallel version of this function.
2820 
2821 
2822 INPUT PARAMETERS:
2823  Network - network initialized with one of the network creation funcs
2824  XY - original dataset in sparse format; one sample = one row:
2825  * MATRIX MUST BE STORED IN CRS FORMAT
2826  * first NIn columns contain inputs.
2827  * for regression problem, next NOut columns store
2828  desired outputs.
2829  * for classification problem, next column (just one!)
2830  stores class number.
2831  SSize - number of elements in XY
2832  Grad - possibly preallocated array. If size of array is smaller
2833  than WCount, it will be reallocated. It is recommended to
2834  reuse previously allocated array to reduce allocation
2835  overhead.
2836 
2837 OUTPUT PARAMETERS:
2838  E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
2839  Grad - gradient of E with respect to weights of network, array[WCount]
2840 
2841  -- ALGLIB --
2842  Copyright 26.07.2012 by Bochkanov Sergey
2843 *************************************************************************/
2844 void mlpgradbatchsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
2845 void smp_mlpgradbatchsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
2846 
2847 
2848 /*************************************************************************
2849 Batch gradient calculation for a subset of dataset
2850 
2851 
2852 FOR USERS OF COMMERCIAL EDITION:
2853 
2854  ! Commercial version of ALGLIB includes two important improvements of
2855  ! this function:
2856  ! * multicore support (C++ and C# computational cores)
2857  ! * SSE support
2858  !
2859  ! First improvement gives close-to-linear speedup on multicore systems.
2860  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2861  !
2862  ! In order to use multicore features you have to:
2863  ! * use commercial version of ALGLIB
2864  ! * call this function with "smp_" prefix, which indicates that
2865  ! multicore code will be used (for multicore support)
2866  !
2867  ! In order to use SSE features you have to:
2868  ! * use commercial version of ALGLIB on Intel processors
2869  ! * use C++ computational core
2870  !
2871  ! This note is given for users of commercial edition; if you use GPL
2872  ! edition, you still will be able to call smp-version of this function,
2873  ! but all computations will be done serially.
2874  !
2875  ! We recommend you to carefully read ALGLIB Reference Manual, section
2876  ! called 'SMP support', before using parallel version of this function.
2877 
2878 
2879 INPUT PARAMETERS:
2880  Network - network initialized with one of the network creation funcs
2881  XY - original dataset in dense format; one sample = one row:
2882  * first NIn columns contain inputs,
2883  * for regression problem, next NOut columns store
2884  desired outputs.
2885  * for classification problem, next column (just one!)
2886  stores class number.
2887  SetSize - real size of XY, SetSize>=0;
2888  Idx - subset of SubsetSize elements, array[SubsetSize]:
2889  * Idx[I] stores row index in the original dataset which is
2890  given by XY. Gradient is calculated with respect to rows
2891  whose indexes are stored in Idx[].
2892  * Idx[] must store correct indexes; this function throws
2893  an exception in case incorrect index (less than 0 or
2894  larger than rows(XY)) is given
2895  * Idx[] may store indexes in any order and even with
2896  repetitions.
2897  SubsetSize- number of elements in Idx[] array:
2898  * positive value means that subset given by Idx[] is processed
2899  * zero value results in zero gradient
2900  * negative value means that full dataset is processed
2901  Grad - possibly preallocated array. If size of array is smaller
2902  than WCount, it will be reallocated. It is recommended to
2903  reuse previously allocated array to reduce allocation
2904  overhead.
2905 
2906 OUTPUT PARAMETERS:
2907  E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
2908  Grad - gradient of E with respect to weights of network,
2909  array[WCount]
2910 
2911  -- ALGLIB --
2912  Copyright 26.07.2012 by Bochkanov Sergey
2913 *************************************************************************/
2914 void mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad);
2915 void smp_mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad);
2916 
2917 
2918 /*************************************************************************
2919 Batch gradient calculation for a set of inputs/outputs for a subset of
2920 dataset given by set of indexes.
2921 
2922 
2923 FOR USERS OF COMMERCIAL EDITION:
2924 
2925  ! Commercial version of ALGLIB includes two important improvements of
2926  ! this function:
2927  ! * multicore support (C++ and C# computational cores)
2928  ! * SSE support
2929  !
2930  ! First improvement gives close-to-linear speedup on multicore systems.
2931  ! Second improvement gives constant speedup (2-3x depending on your CPU)
2932  !
2933  ! In order to use multicore features you have to:
2934  ! * use commercial version of ALGLIB
2935  ! * call this function with "smp_" prefix, which indicates that
2936  ! multicore code will be used (for multicore support)
2937  !
2938  ! In order to use SSE features you have to:
2939  ! * use commercial version of ALGLIB on Intel processors
2940  ! * use C++ computational core
2941  !
2942  ! This note is given for users of commercial edition; if you use GPL
2943  ! edition, you still will be able to call smp-version of this function,
2944  ! but all computations will be done serially.
2945  !
2946  ! We recommend you to carefully read ALGLIB Reference Manual, section
2947  ! called 'SMP support', before using parallel version of this function.
2948 
2949 
2950 INPUT PARAMETERS:
2951  Network - network initialized with one of the network creation funcs
2952  XY - original dataset in sparse format; one sample = one row:
2953  * MATRIX MUST BE STORED IN CRS FORMAT
2954  * first NIn columns contain inputs,
2955  * for regression problem, next NOut columns store
2956  desired outputs.
2957  * for classification problem, next column (just one!)
2958  stores class number.
2959  SetSize - real size of XY, SetSize>=0;
2960  Idx - subset of SubsetSize elements, array[SubsetSize]:
2961  * Idx[I] stores row index in the original dataset which is
2962  given by XY. Gradient is calculated with respect to rows
2963  whose indexes are stored in Idx[].
2964  * Idx[] must store correct indexes; this function throws
2965  an exception in case incorrect index (less than 0 or
2966  larger than rows(XY)) is given
2967  * Idx[] may store indexes in any order and even with
2968  repetitions.
2969  SubsetSize- number of elements in Idx[] array:
2970  * positive value means that subset given by Idx[] is processed
2971  * zero value results in zero gradient
2972  * negative value means that full dataset is processed
2973  Grad - possibly preallocated array. If size of array is smaller
2974  than WCount, it will be reallocated. It is recommended to
2975  reuse previously allocated array to reduce allocation
2976  overhead.
2977 
2978 OUTPUT PARAMETERS:
2979  E - error function, SUM(sqr(y[i]-desiredy[i])/2,i)
2980  Grad - gradient of E with respect to weights of network,
2981  array[WCount]
2982 
2983 NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatchSparse
2984  function.
2985 
2986  -- ALGLIB --
2987  Copyright 26.07.2012 by Bochkanov Sergey
2988 *************************************************************************/
2989 void mlpgradbatchsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad);
2990 void smp_mlpgradbatchsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad);
2991 
2992 
2993 /*************************************************************************
2994 Batch gradient calculation for a set of inputs/outputs
2995 (natural error function is used)
2996 
2997 INPUT PARAMETERS:
2998  Network - network initialized with one of the network creation funcs
2999  XY - set of inputs/outputs; one sample = one row;
3000  first NIn columns contain inputs,
3001  next NOut columns - desired outputs.
3002  SSize - number of elements in XY
3003  Grad - possibly preallocated array. If size of array is smaller
3004  than WCount, it will be reallocated. It is recommended to
3005  reuse previously allocated array to reduce allocation
3006  overhead.
3007 
3008 OUTPUT PARAMETERS:
3009  E - error function, sum-of-squares for regression networks,
3010  cross-entropy for classification networks.
3011  Grad - gradient of E with respect to weights of network, array[WCount]
3012 
3013  -- ALGLIB --
3014  Copyright 04.11.2007 by Bochkanov Sergey
3015 *************************************************************************/
3016 void mlpgradnbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad);
3017 
3018 
3019 /*************************************************************************
3020 Batch Hessian calculation (natural error function) using R-algorithm.
3021 Internal subroutine.
3022 
3023  -- ALGLIB --
3024  Copyright 26.01.2008 by Bochkanov Sergey.
3025 
3026  Hessian calculation based on R-algorithm described in
3027  "Fast Exact Multiplication by the Hessian",
3028  B. A. Pearlmutter,
3029  Neural Computation, 1994.
3030 *************************************************************************/
3031 void mlphessiannbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h);
3032 
3033 
3034 /*************************************************************************
3035 Batch Hessian calculation using R-algorithm.
3036 Internal subroutine.
3037 
3038  -- ALGLIB --
3039  Copyright 26.01.2008 by Bochkanov Sergey.
3040 
3041  Hessian calculation based on R-algorithm described in
3042  "Fast Exact Multiplication by the Hessian",
3043  B. A. Pearlmutter,
3044  Neural Computation, 1994.
3045 *************************************************************************/
3046 void mlphessianbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h);
3047 
3048 
3049 /*************************************************************************
3050 Calculation of all types of errors on subset of dataset.
3051 
3052 FOR USERS OF COMMERCIAL EDITION:
3053 
3054  ! Commercial version of ALGLIB includes two important improvements of
3055  ! this function:
3056  ! * multicore support (C++ and C# computational cores)
3057  ! * SSE support
3058  !
3059  ! First improvement gives close-to-linear speedup on multicore systems.
3060  ! Second improvement gives constant speedup (2-3x depending on your CPU)
3061  !
3062  ! In order to use multicore features you have to:
3063  ! * use commercial version of ALGLIB
3064  ! * call this function with "smp_" prefix, which indicates that
3065  ! multicore code will be used (for multicore support)
3066  !
3067  ! In order to use SSE features you have to:
3068  ! * use commercial version of ALGLIB on Intel processors
3069  ! * use C++ computational core
3070  !
3071  ! This note is given for users of commercial edition; if you use GPL
3072  ! edition, you still will be able to call smp-version of this function,
3073  ! but all computations will be done serially.
3074  !
3075  ! We recommend you to carefully read ALGLIB Reference Manual, section
3076  ! called 'SMP support', before using parallel version of this function.
3077 
3078 
3079 INPUT PARAMETERS:
3080  Network - network initialized with one of the network creation funcs
3081  XY - original dataset; one sample = one row;
3082  first NIn columns contain inputs,
3083  next NOut columns - desired outputs.
3084  SetSize - real size of XY, SetSize>=0;
3085  Subset - subset of SubsetSize elements, array[SubsetSize];
3086  SubsetSize- number of elements in Subset[] array:
3087  * if SubsetSize>0, rows of XY with indices Subset[0]...
3088  ...Subset[SubsetSize-1] are processed
3089  * if SubsetSize=0, zeros are returned
3090  * if SubsetSize<0, entire dataset is processed; Subset[]
3091  array is ignored in this case.
3092 
3093 OUTPUT PARAMETERS:
3094  Rep - it contains all type of errors.
3095 
3096  -- ALGLIB --
3097  Copyright 04.09.2012 by Bochkanov Sergey
3098 *************************************************************************/
3099 void mlpallerrorssubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep);
3100 void smp_mlpallerrorssubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep);
3101 
3102 
3103 /*************************************************************************
3104 Calculation of all types of errors on subset of dataset.
3105 
3106 FOR USERS OF COMMERCIAL EDITION:
3107 
3108  ! Commercial version of ALGLIB includes two important improvements of
3109  ! this function:
3110  ! * multicore support (C++ and C# computational cores)
3111  ! * SSE support
3112  !
3113  ! First improvement gives close-to-linear speedup on multicore systems.
3114  ! Second improvement gives constant speedup (2-3x depending on your CPU)
3115  !
3116  ! In order to use multicore features you have to:
3117  ! * use commercial version of ALGLIB
3118  ! * call this function with "smp_" prefix, which indicates that
3119  ! multicore code will be used (for multicore support)
3120  !
3121  ! In order to use SSE features you have to:
3122  ! * use commercial version of ALGLIB on Intel processors
3123  ! * use C++ computational core
3124  !
3125  ! This note is given for users of commercial edition; if you use GPL
3126  ! edition, you still will be able to call smp-version of this function,
3127  ! but all computations will be done serially.
3128  !
3129  ! We recommend you to carefully read ALGLIB Reference Manual, section
3130  ! called 'SMP support', before using parallel version of this function.
3131 
3132 
3133 INPUT PARAMETERS:
3134  Network - network initialized with one of the network creation funcs
3135  XY - original dataset given by sparse matrix;
3136  one sample = one row;
3137  first NIn columns contain inputs,
3138  next NOut columns - desired outputs.
3139  SetSize - real size of XY, SetSize>=0;
3140  Subset - subset of SubsetSize elements, array[SubsetSize];
3141  SubsetSize- number of elements in Subset[] array:
3142  * if SubsetSize>0, rows of XY with indices Subset[0]...
3143  ...Subset[SubsetSize-1] are processed
3144  * if SubsetSize=0, zeros are returned
3145  * if SubsetSize<0, entire dataset is processed; Subset[]
3146  array is ignored in this case.
3147 
3148 OUTPUT PARAMETERS:
3149  Rep - it contains all type of errors.
3150 
3151 
3152  -- ALGLIB --
3153  Copyright 04.09.2012 by Bochkanov Sergey
3154 *************************************************************************/
3155 void mlpallerrorssparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep);
3156 void smp_mlpallerrorssparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep);
3157 
3158 
3159 /*************************************************************************
3160 Error of the neural network on subset of dataset.
3161 
3162 
3163 FOR USERS OF COMMERCIAL EDITION:
3164 
3165  ! Commercial version of ALGLIB includes two important improvements of
3166  ! this function:
3167  ! * multicore support (C++ and C# computational cores)
3168  ! * SSE support
3169  !
3170  ! First improvement gives close-to-linear speedup on multicore systems.
3171  ! Second improvement gives constant speedup (2-3x depending on your CPU)
3172  !
3173  ! In order to use multicore features you have to:
3174  ! * use commercial version of ALGLIB
3175  ! * call this function with "smp_" prefix, which indicates that
3176  ! multicore code will be used (for multicore support)
3177  !
3178  ! In order to use SSE features you have to:
3179  ! * use commercial version of ALGLIB on Intel processors
3180  ! * use C++ computational core
3181  !
3182  ! This note is given for users of commercial edition; if you use GPL
3183  ! edition, you still will be able to call smp-version of this function,
3184  ! but all computations will be done serially.
3185  !
3186  ! We recommend you to carefully read ALGLIB Reference Manual, section
3187  ! called 'SMP support', before using parallel version of this function.
3188 
3189 
3190 INPUT PARAMETERS:
3191  Network - neural network;
3192  XY - training set, see below for information on the
3193  training set format;
3194  SetSize - real size of XY, SetSize>=0;
3195  Subset - subset of SubsetSize elements, array[SubsetSize];
3196  SubsetSize- number of elements in Subset[] array:
3197  * if SubsetSize>0, rows of XY with indices Subset[0]...
3198  ...Subset[SubsetSize-1] are processed
3199  * if SubsetSize=0, zeros are returned
3200  * if SubsetSize<0, entire dataset is processed; Subset[]
3201  array is ignored in this case.
3202 
3203 RESULT:
3204  sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
3205 
3206 DATASET FORMAT:
3207 
3208 This function uses two different dataset formats - one for regression
3209 networks, another one for classification networks.
3210 
3211 For regression networks with NIn inputs and NOut outputs following dataset
3212 format is used:
3213 * dataset is given by NPoints*(NIn+NOut) matrix
3214 * each row corresponds to one example
3215 * first NIn columns are inputs, next NOut columns are outputs
3216 
3217 For classification networks with NIn inputs and NClasses clases following
3218 dataset format is used:
3219 * dataset is given by NPoints*(NIn+1) matrix
3220 * each row corresponds to one example
3221 * first NIn columns are inputs, last column stores class number (from 0 to
3222  NClasses-1).
3223 
3224  -- ALGLIB --
3225  Copyright 04.09.2012 by Bochkanov Sergey
3226 *************************************************************************/
3227 double mlperrorsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize);
3228 double smp_mlperrorsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize);
3229 
3230 
3231 /*************************************************************************
3232 Error of the neural network on subset of sparse dataset.
3233 
3234 
3235 FOR USERS OF COMMERCIAL EDITION:
3236 
3237  ! Commercial version of ALGLIB includes two important improvements of
3238  ! this function:
3239  ! * multicore support (C++ and C# computational cores)
3240  ! * SSE support
3241  !
3242  ! First improvement gives close-to-linear speedup on multicore systems.
3243  ! Second improvement gives constant speedup (2-3x depending on your CPU)
3244  !
3245  ! In order to use multicore features you have to:
3246  ! * use commercial version of ALGLIB
3247  ! * call this function with "smp_" prefix, which indicates that
3248  ! multicore code will be used (for multicore support)
3249  !
3250  ! In order to use SSE features you have to:
3251  ! * use commercial version of ALGLIB on Intel processors
3252  ! * use C++ computational core
3253  !
3254  ! This note is given for users of commercial edition; if you use GPL
3255  ! edition, you still will be able to call smp-version of this function,
3256  ! but all computations will be done serially.
3257  !
3258  ! We recommend you to carefully read ALGLIB Reference Manual, section
3259  ! called 'SMP support', before using parallel version of this function.
3260 
3261 
3262 INPUT PARAMETERS:
3263  Network - neural network;
3264  XY - training set, see below for information on the
3265  training set format. This function checks correctness
3266  of the dataset (no NANs/INFs, class numbers are
3267  correct) and throws exception when incorrect dataset
3268  is passed. Sparse matrix must use CRS format for
3269  storage.
3270  SetSize - real size of XY, SetSize>=0;
3271  it is used when SubsetSize<0;
3272  Subset - subset of SubsetSize elements, array[SubsetSize];
3273  SubsetSize- number of elements in Subset[] array:
3274  * if SubsetSize>0, rows of XY with indices Subset[0]...
3275  ...Subset[SubsetSize-1] are processed
3276  * if SubsetSize=0, zeros are returned
3277  * if SubsetSize<0, entire dataset is processed; Subset[]
3278  array is ignored in this case.
3279 
3280 RESULT:
3281  sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
3282 
3283 DATASET FORMAT:
3284 
3285 This function uses two different dataset formats - one for regression
3286 networks, another one for classification networks.
3287 
3288 For regression networks with NIn inputs and NOut outputs following dataset
3289 format is used:
3290 * dataset is given by NPoints*(NIn+NOut) matrix
3291 * each row corresponds to one example
3292 * first NIn columns are inputs, next NOut columns are outputs
3293 
3294 For classification networks with NIn inputs and NClasses clases following
3295 dataset format is used:
3296 * dataset is given by NPoints*(NIn+1) matrix
3297 * each row corresponds to one example
3298 * first NIn columns are inputs, last column stores class number (from 0 to
3299  NClasses-1).
3300 
3301  -- ALGLIB --
3302  Copyright 04.09.2012 by Bochkanov Sergey
3303 *************************************************************************/
3304 double mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize);
3305 double smp_mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize);
3306 
3307 /*************************************************************************
3308 Multiclass Fisher LDA
3309 
3310 Subroutine finds coefficients of linear combination which optimally separates
3311 training set on classes.
3312 
3313 COMMERCIAL EDITION OF ALGLIB:
3314 
3315  ! Commercial version of ALGLIB includes two important improvements of
3316  ! this function, which can be used from C++ and C#:
3317  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3318  ! * multithreading support
3319  !
3320  ! Intel MKL gives approximately constant (with respect to number of
3321  ! worker threads) acceleration factor which depends on CPU being used,
3322  ! problem size and "baseline" ALGLIB edition which is used for
3323  ! comparison. Best results are achieved for high-dimensional problems
3324  ! (NVars is at least 256).
3325  !
3326  ! Multithreading is used to accelerate initial phase of LDA, which
3327  ! includes calculation of products of large matrices. Again, for best
3328  ! efficiency problem must be high-dimensional.
3329  !
3330  ! Generally, commercial ALGLIB is several times faster than open-source
3331  ! generic C edition, and many times faster than open-source C# edition.
3332  !
3333  ! We recommend you to read 'Working with commercial version' section of
3334  ! ALGLIB Reference Manual in order to find out how to use performance-
3335  ! related features provided by commercial edition of ALGLIB.
3336 
3337 INPUT PARAMETERS:
3338  XY - training set, array[0..NPoints-1,0..NVars].
3339  First NVars columns store values of independent
3340  variables, next column stores number of class (from 0
3341  to NClasses-1) which dataset element belongs to. Fractional
3342  values are rounded to nearest integer.
3343  NPoints - training set size, NPoints>=0
3344  NVars - number of independent variables, NVars>=1
3345  NClasses - number of classes, NClasses>=2
3346 
3347 
3348 OUTPUT PARAMETERS:
3349  Info - return code:
3350  * -4, if internal EVD subroutine hasn't converged
3351  * -2, if there is a point with class number
3352  outside of [0..NClasses-1].
3353  * -1, if incorrect parameters was passed (NPoints<0,
3354  NVars<1, NClasses<2)
3355  * 1, if task has been solved
3356  * 2, if there was a multicollinearity in training set,
3357  but task has been solved.
3358  W - linear combination coefficients, array[0..NVars-1]
3359 
3360  -- ALGLIB --
3361  Copyright 31.05.2008 by Bochkanov Sergey
3362 *************************************************************************/
3363 void fisherlda(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_1d_array &w);
3364 
3365 
3366 /*************************************************************************
3367 N-dimensional multiclass Fisher LDA
3368 
3369 Subroutine finds coefficients of linear combinations which optimally separates
3370 training set on classes. It returns N-dimensional basis whose vector are sorted
3371 by quality of training set separation (in descending order).
3372 
3373 COMMERCIAL EDITION OF ALGLIB:
3374 
3375  ! Commercial version of ALGLIB includes two important improvements of
3376  ! this function, which can be used from C++ and C#:
3377  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
3378  ! * multithreading support
3379  !
3380  ! Intel MKL gives approximately constant (with respect to number of
3381  ! worker threads) acceleration factor which depends on CPU being used,
3382  ! problem size and "baseline" ALGLIB edition which is used for
3383  ! comparison. Best results are achieved for high-dimensional problems
3384  ! (NVars is at least 256).
3385  !
3386  ! Multithreading is used to accelerate initial phase of LDA, which
3387  ! includes calculation of products of large matrices. Again, for best
3388  ! efficiency problem must be high-dimensional.
3389  !
3390  ! Generally, commercial ALGLIB is several times faster than open-source
3391  ! generic C edition, and many times faster than open-source C# edition.
3392  !
3393  ! We recommend you to read 'Working with commercial version' section of
3394  ! ALGLIB Reference Manual in order to find out how to use performance-
3395  ! related features provided by commercial edition of ALGLIB.
3396 
3397 INPUT PARAMETERS:
3398  XY - training set, array[0..NPoints-1,0..NVars].
3399  First NVars columns store values of independent
3400  variables, next column stores number of class (from 0
3401  to NClasses-1) which dataset element belongs to. Fractional
3402  values are rounded to nearest integer.
3403  NPoints - training set size, NPoints>=0
3404  NVars - number of independent variables, NVars>=1
3405  NClasses - number of classes, NClasses>=2
3406 
3407 
3408 OUTPUT PARAMETERS:
3409  Info - return code:
3410  * -4, if internal EVD subroutine hasn't converged
3411  * -2, if there is a point with class number
3412  outside of [0..NClasses-1].
3413  * -1, if incorrect parameters was passed (NPoints<0,
3414  NVars<1, NClasses<2)
3415  * 1, if task has been solved
3416  * 2, if there was a multicollinearity in training set,
3417  but task has been solved.
3418  W - basis, array[0..NVars-1,0..NVars-1]
3419  columns of matrix stores basis vectors, sorted by
3420  quality of training set separation (in descending order)
3421 
3422  -- ALGLIB --
3423  Copyright 31.05.2008 by Bochkanov Sergey
3424 *************************************************************************/
3425 void fisherldan(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_2d_array &w);
3426 void smp_fisherldan(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_2d_array &w);
3427 
3428 /*************************************************************************
3429 Linear regression
3430 
3431 Subroutine builds model:
3432 
3433  Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N)
3434 
3435 and model found in ALGLIB format, covariation matrix, training set errors
3436 (rms, average, average relative) and leave-one-out cross-validation
3437 estimate of the generalization error. CV estimate calculated using fast
3438 algorithm with O(NPoints*NVars) complexity.
3439 
3440 When covariation matrix is calculated standard deviations of function
3441 values are assumed to be equal to RMS error on the training set.
3442 
3443 INPUT PARAMETERS:
3444  XY - training set, array [0..NPoints-1,0..NVars]:
3445  * NVars columns - independent variables
3446  * last column - dependent variable
3447  NPoints - training set size, NPoints>NVars+1
3448  NVars - number of independent variables
3449 
3450 OUTPUT PARAMETERS:
3451  Info - return code:
3452  * -255, in case of unknown internal error
3453  * -4, if internal SVD subroutine haven't converged
3454  * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1).
3455  * 1, if subroutine successfully finished
3456  LM - linear model in the ALGLIB format. Use subroutines of
3457  this unit to work with the model.
3458  AR - additional results
3459 
3460 
3461  -- ALGLIB --
3462  Copyright 02.08.2008 by Bochkanov Sergey
3463 *************************************************************************/
3464 void lrbuild(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar);
3465 
3466 
3467 /*************************************************************************
3468 Linear regression
3469 
3470 Variant of LRBuild which uses vector of standatd deviations (errors in
3471 function values).
3472 
3473 INPUT PARAMETERS:
3474  XY - training set, array [0..NPoints-1,0..NVars]:
3475  * NVars columns - independent variables
3476  * last column - dependent variable
3477  S - standard deviations (errors in function values)
3478  array[0..NPoints-1], S[i]>0.
3479  NPoints - training set size, NPoints>NVars+1
3480  NVars - number of independent variables
3481 
3482 OUTPUT PARAMETERS:
3483  Info - return code:
3484  * -255, in case of unknown internal error
3485  * -4, if internal SVD subroutine haven't converged
3486  * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1).
3487  * -2, if S[I]<=0
3488  * 1, if subroutine successfully finished
3489  LM - linear model in the ALGLIB format. Use subroutines of
3490  this unit to work with the model.
3491  AR - additional results
3492 
3493 
3494  -- ALGLIB --
3495  Copyright 02.08.2008 by Bochkanov Sergey
3496 *************************************************************************/
3497 void lrbuilds(const real_2d_array &xy, const real_1d_array &s, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar);
3498 
3499 
3500 /*************************************************************************
3501 Like LRBuildS, but builds model
3502 
3503  Y = A(0)*X[0] + ... + A(N-1)*X[N-1]
3504 
3505 i.e. with zero constant term.
3506 
3507  -- ALGLIB --
3508  Copyright 30.10.2008 by Bochkanov Sergey
3509 *************************************************************************/
3510 void lrbuildzs(const real_2d_array &xy, const real_1d_array &s, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar);
3511 
3512 
3513 /*************************************************************************
3514 Like LRBuild but builds model
3515 
3516  Y = A(0)*X[0] + ... + A(N-1)*X[N-1]
3517 
3518 i.e. with zero constant term.
3519 
3520  -- ALGLIB --
3521  Copyright 30.10.2008 by Bochkanov Sergey
3522 *************************************************************************/
3523 void lrbuildz(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar);
3524 
3525 
3526 /*************************************************************************
3527 Unpacks coefficients of linear model.
3528 
3529 INPUT PARAMETERS:
3530  LM - linear model in ALGLIB format
3531 
3532 OUTPUT PARAMETERS:
3533  V - coefficients, array[0..NVars]
3534  constant term (intercept) is stored in the V[NVars].
3535  NVars - number of independent variables (one less than number
3536  of coefficients)
3537 
3538  -- ALGLIB --
3539  Copyright 30.08.2008 by Bochkanov Sergey
3540 *************************************************************************/
3541 void lrunpack(const linearmodel &lm, real_1d_array &v, ae_int_t &nvars);
3542 
3543 
3544 /*************************************************************************
3545 "Packs" coefficients and creates linear model in ALGLIB format (LRUnpack
3546 reversed).
3547 
3548 INPUT PARAMETERS:
3549  V - coefficients, array[0..NVars]
3550  NVars - number of independent variables
3551 
3552 OUTPUT PAREMETERS:
3553  LM - linear model.
3554 
3555  -- ALGLIB --
3556  Copyright 30.08.2008 by Bochkanov Sergey
3557 *************************************************************************/
3558 void lrpack(const real_1d_array &v, const ae_int_t nvars, linearmodel &lm);
3559 
3560 
3561 /*************************************************************************
3562 Procesing
3563 
3564 INPUT PARAMETERS:
3565  LM - linear model
3566  X - input vector, array[0..NVars-1].
3567 
3568 Result:
3569  value of linear model regression estimate
3570 
3571  -- ALGLIB --
3572  Copyright 03.09.2008 by Bochkanov Sergey
3573 *************************************************************************/
3574 double lrprocess(const linearmodel &lm, const real_1d_array &x);
3575 
3576 
3577 /*************************************************************************
3578 RMS error on the test set
3579 
3580 INPUT PARAMETERS:
3581  LM - linear model
3582  XY - test set
3583  NPoints - test set size
3584 
3585 RESULT:
3586  root mean square error.
3587 
3588  -- ALGLIB --
3589  Copyright 30.08.2008 by Bochkanov Sergey
3590 *************************************************************************/
3591 double lrrmserror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
3592 
3593 
3594 /*************************************************************************
3595 Average error on the test set
3596 
3597 INPUT PARAMETERS:
3598  LM - linear model
3599  XY - test set
3600  NPoints - test set size
3601 
3602 RESULT:
3603  average error.
3604 
3605  -- ALGLIB --
3606  Copyright 30.08.2008 by Bochkanov Sergey
3607 *************************************************************************/
3608 double lravgerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
3609 
3610 
3611 /*************************************************************************
3612 RMS error on the test set
3613 
3614 INPUT PARAMETERS:
3615  LM - linear model
3616  XY - test set
3617  NPoints - test set size
3618 
3619 RESULT:
3620  average relative error.
3621 
3622  -- ALGLIB --
3623  Copyright 30.08.2008 by Bochkanov Sergey
3624 *************************************************************************/
3625 double lravgrelerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
3626 
3627 /*************************************************************************
3628 Filters: simple moving averages (unsymmetric).
3629 
3630 This filter replaces array by results of SMA(K) filter. SMA(K) is defined
3631 as filter which averages at most K previous points (previous - not points
3632 AROUND central point) - or less, in case of the first K-1 points.
3633 
3634 INPUT PARAMETERS:
3635  X - array[N], array to process. It can be larger than N,
3636  in this case only first N points are processed.
3637  N - points count, N>=0
3638  K - K>=1 (K can be larger than N , such cases will be
3639  correctly handled). Window width. K=1 corresponds to
3640  identity transformation (nothing changes).
3641 
3642 OUTPUT PARAMETERS:
3643  X - array, whose first N elements were processed with SMA(K)
3644 
3645 NOTE 1: this function uses efficient in-place algorithm which does not
3646  allocate temporary arrays.
3647 
3648 NOTE 2: this algorithm makes only one pass through array and uses running
3649  sum to speed-up calculation of the averages. Additional measures
3650  are taken to ensure that running sum on a long sequence of zero
3651  elements will be correctly reset to zero even in the presence of
3652  round-off error.
3653 
3654 NOTE 3: this is unsymmetric version of the algorithm, which does NOT
3655  averages points after the current one. Only X[i], X[i-1], ... are
3656  used when calculating new value of X[i]. We should also note that
3657  this algorithm uses BOTH previous points and current one, i.e.
3658  new value of X[i] depends on BOTH previous point and X[i] itself.
3659 
3660  -- ALGLIB --
3661  Copyright 25.10.2011 by Bochkanov Sergey
3662 *************************************************************************/
3663 void filtersma(real_1d_array &x, const ae_int_t n, const ae_int_t k);
3665 
3666 
3667 /*************************************************************************
3668 Filters: exponential moving averages.
3669 
3670 This filter replaces array by results of EMA(alpha) filter. EMA(alpha) is
3671 defined as filter which replaces X[] by S[]:
3672  S[0] = X[0]
3673  S[t] = alpha*X[t] + (1-alpha)*S[t-1]
3674 
3675 INPUT PARAMETERS:
3676  X - array[N], array to process. It can be larger than N,
3677  in this case only first N points are processed.
3678  N - points count, N>=0
3679  alpha - 0<alpha<=1, smoothing parameter.
3680 
3681 OUTPUT PARAMETERS:
3682  X - array, whose first N elements were processed
3683  with EMA(alpha)
3684 
3685 NOTE 1: this function uses efficient in-place algorithm which does not
3686  allocate temporary arrays.
3687 
3688 NOTE 2: this algorithm uses BOTH previous points and current one, i.e.
3689  new value of X[i] depends on BOTH previous point and X[i] itself.
3690 
3691 NOTE 3: technical analytis users quite often work with EMA coefficient
3692  expressed in DAYS instead of fractions. If you want to calculate
3693  EMA(N), where N is a number of days, you can use alpha=2/(N+1).
3694 
3695  -- ALGLIB --
3696  Copyright 25.10.2011 by Bochkanov Sergey
3697 *************************************************************************/
3698 void filterema(real_1d_array &x, const ae_int_t n, const double alpha);
3699 void filterema(real_1d_array &x, const double alpha);
3700 
3701 
3702 /*************************************************************************
3703 Filters: linear regression moving averages.
3704 
3705 This filter replaces array by results of LRMA(K) filter.
3706 
3707 LRMA(K) is defined as filter which, for each data point, builds linear
3708 regression model using K prevous points (point itself is included in
3709 these K points) and calculates value of this linear model at the point in
3710 question.
3711 
3712 INPUT PARAMETERS:
3713  X - array[N], array to process. It can be larger than N,
3714  in this case only first N points are processed.
3715  N - points count, N>=0
3716  K - K>=1 (K can be larger than N , such cases will be
3717  correctly handled). Window width. K=1 corresponds to
3718  identity transformation (nothing changes).
3719 
3720 OUTPUT PARAMETERS:
3721  X - array, whose first N elements were processed with SMA(K)
3722 
3723 NOTE 1: this function uses efficient in-place algorithm which does not
3724  allocate temporary arrays.
3725 
3726 NOTE 2: this algorithm makes only one pass through array and uses running
3727  sum to speed-up calculation of the averages. Additional measures
3728  are taken to ensure that running sum on a long sequence of zero
3729  elements will be correctly reset to zero even in the presence of
3730  round-off error.
3731 
3732 NOTE 3: this is unsymmetric version of the algorithm, which does NOT
3733  averages points after the current one. Only X[i], X[i-1], ... are
3734  used when calculating new value of X[i]. We should also note that
3735  this algorithm uses BOTH previous points and current one, i.e.
3736  new value of X[i] depends on BOTH previous point and X[i] itself.
3737 
3738  -- ALGLIB --
3739  Copyright 25.10.2011 by Bochkanov Sergey
3740 *************************************************************************/
3741 void filterlrma(real_1d_array &x, const ae_int_t n, const ae_int_t k);
3743 
3744 /*************************************************************************
3745 This subroutine trains logit model.
3746 
3747 INPUT PARAMETERS:
3748  XY - training set, array[0..NPoints-1,0..NVars]
3749  First NVars columns store values of independent
3750  variables, next column stores number of class (from 0
3751  to NClasses-1) which dataset element belongs to. Fractional
3752  values are rounded to nearest integer.
3753  NPoints - training set size, NPoints>=1
3754  NVars - number of independent variables, NVars>=1
3755  NClasses - number of classes, NClasses>=2
3756 
3757 OUTPUT PARAMETERS:
3758  Info - return code:
3759  * -2, if there is a point with class number
3760  outside of [0..NClasses-1].
3761  * -1, if incorrect parameters was passed
3762  (NPoints<NVars+2, NVars<1, NClasses<2).
3763  * 1, if task has been solved
3764  LM - model built
3765  Rep - training report
3766 
3767  -- ALGLIB --
3768  Copyright 10.09.2008 by Bochkanov Sergey
3769 *************************************************************************/
3770 void mnltrainh(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, logitmodel &lm, mnlreport &rep);
3771 
3772 
3773 /*************************************************************************
3774 Procesing
3775 
3776 INPUT PARAMETERS:
3777  LM - logit model, passed by non-constant reference
3778  (some fields of structure are used as temporaries
3779  when calculating model output).
3780  X - input vector, array[0..NVars-1].
3781  Y - (possibly) preallocated buffer; if size of Y is less than
3782  NClasses, it will be reallocated.If it is large enough, it
3783  is NOT reallocated, so we can save some time on reallocation.
3784 
3785 OUTPUT PARAMETERS:
3786  Y - result, array[0..NClasses-1]
3787  Vector of posterior probabilities for classification task.
3788 
3789  -- ALGLIB --
3790  Copyright 10.09.2008 by Bochkanov Sergey
3791 *************************************************************************/
3792 void mnlprocess(const logitmodel &lm, const real_1d_array &x, real_1d_array &y);
3793 
3794 
3795 /*************************************************************************
3796 'interactive' variant of MNLProcess for languages like Python which
3797 support constructs like "Y = MNLProcess(LM,X)" and interactive mode of the
3798 interpreter
3799 
3800 This function allocates new array on each call, so it is significantly
3801 slower than its 'non-interactive' counterpart, but it is more convenient
3802 when you call it from command line.
3803 
3804  -- ALGLIB --
3805  Copyright 10.09.2008 by Bochkanov Sergey
3806 *************************************************************************/
3807 void mnlprocessi(const logitmodel &lm, const real_1d_array &x, real_1d_array &y);
3808 
3809 
3810 /*************************************************************************
3811 Unpacks coefficients of logit model. Logit model have form:
3812 
3813  P(class=i) = S(i) / (S(0) + S(1) + ... +S(M-1))
3814  S(i) = Exp(A[i,0]*X[0] + ... + A[i,N-1]*X[N-1] + A[i,N]), when i<M-1
3815  S(M-1) = 1
3816 
3817 INPUT PARAMETERS:
3818  LM - logit model in ALGLIB format
3819 
3820 OUTPUT PARAMETERS:
3821  V - coefficients, array[0..NClasses-2,0..NVars]
3822  NVars - number of independent variables
3823  NClasses - number of classes
3824 
3825  -- ALGLIB --
3826  Copyright 10.09.2008 by Bochkanov Sergey
3827 *************************************************************************/
3828 void mnlunpack(const logitmodel &lm, real_2d_array &a, ae_int_t &nvars, ae_int_t &nclasses);
3829 
3830 
3831 /*************************************************************************
3832 "Packs" coefficients and creates logit model in ALGLIB format (MNLUnpack
3833 reversed).
3834 
3835 INPUT PARAMETERS:
3836  A - model (see MNLUnpack)
3837  NVars - number of independent variables
3838  NClasses - number of classes
3839 
3840 OUTPUT PARAMETERS:
3841  LM - logit model.
3842 
3843  -- ALGLIB --
3844  Copyright 10.09.2008 by Bochkanov Sergey
3845 *************************************************************************/
3846 void mnlpack(const real_2d_array &a, const ae_int_t nvars, const ae_int_t nclasses, logitmodel &lm);
3847 
3848 
3849 /*************************************************************************
3850 Average cross-entropy (in bits per element) on the test set
3851 
3852 INPUT PARAMETERS:
3853  LM - logit model
3854  XY - test set
3855  NPoints - test set size
3856 
3857 RESULT:
3858  CrossEntropy/(NPoints*ln(2)).
3859 
3860  -- ALGLIB --
3861  Copyright 10.09.2008 by Bochkanov Sergey
3862 *************************************************************************/
3863 double mnlavgce(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
3864 
3865 
3866 /*************************************************************************
3867 Relative classification error on the test set
3868 
3869 INPUT PARAMETERS:
3870  LM - logit model
3871  XY - test set
3872  NPoints - test set size
3873 
3874 RESULT:
3875  percent of incorrectly classified cases.
3876 
3877  -- ALGLIB --
3878  Copyright 10.09.2008 by Bochkanov Sergey
3879 *************************************************************************/
3880 double mnlrelclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
3881 
3882 
3883 /*************************************************************************
3884 RMS error on the test set
3885 
3886 INPUT PARAMETERS:
3887  LM - logit model
3888  XY - test set
3889  NPoints - test set size
3890 
3891 RESULT:
3892  root mean square error (error when estimating posterior probabilities).
3893 
3894  -- ALGLIB --
3895  Copyright 30.08.2008 by Bochkanov Sergey
3896 *************************************************************************/
3897 double mnlrmserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
3898 
3899 
3900 /*************************************************************************
3901 Average error on the test set
3902 
3903 INPUT PARAMETERS:
3904  LM - logit model
3905  XY - test set
3906  NPoints - test set size
3907 
3908 RESULT:
3909  average error (error when estimating posterior probabilities).
3910 
3911  -- ALGLIB --
3912  Copyright 30.08.2008 by Bochkanov Sergey
3913 *************************************************************************/
3914 double mnlavgerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
3915 
3916 
3917 /*************************************************************************
3918 Average relative error on the test set
3919 
3920 INPUT PARAMETERS:
3921  LM - logit model
3922  XY - test set
3923  NPoints - test set size
3924 
3925 RESULT:
3926  average relative error (error when estimating posterior probabilities).
3927 
3928  -- ALGLIB --
3929  Copyright 30.08.2008 by Bochkanov Sergey
3930 *************************************************************************/
3931 double mnlavgrelerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t ssize);
3932 
3933 
3934 /*************************************************************************
3935 Classification error on test set = MNLRelClsError*NPoints
3936 
3937  -- ALGLIB --
3938  Copyright 10.09.2008 by Bochkanov Sergey
3939 *************************************************************************/
3940 ae_int_t mnlclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints);
3941 
3942 /*************************************************************************
3943 DESCRIPTION:
3944 
3945 This function creates MCPD (Markov Chains for Population Data) solver.
3946 
3947 This solver can be used to find transition matrix P for N-dimensional
3948 prediction problem where transition from X[i] to X[i+1] is modelled as
3949  X[i+1] = P*X[i]
3950 where X[i] and X[i+1] are N-dimensional population vectors (components of
3951 each X are non-negative), and P is a N*N transition matrix (elements of P
3952 are non-negative, each column sums to 1.0).
3953 
3954 Such models arise when when:
3955 * there is some population of individuals
3956 * individuals can have different states
3957 * individuals can transit from one state to another
3958 * population size is constant, i.e. there is no new individuals and no one
3959  leaves population
3960 * you want to model transitions of individuals from one state into another
3961 
3962 USAGE:
3963 
3964 Here we give very brief outline of the MCPD. We strongly recommend you to
3965 read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
3966 on data analysis which is available at http://www.alglib.net/dataanalysis/
3967 
3968 1. User initializes algorithm state with MCPDCreate() call
3969 
3970 2. User adds one or more tracks - sequences of states which describe
3971  evolution of a system being modelled from different starting conditions
3972 
3973 3. User may add optional boundary, equality and/or linear constraints on
3974  the coefficients of P by calling one of the following functions:
3975  * MCPDSetEC() to set equality constraints
3976  * MCPDSetBC() to set bound constraints
3977  * MCPDSetLC() to set linear constraints
3978 
3979 4. Optionally, user may set custom weights for prediction errors (by
3980  default, algorithm assigns non-equal, automatically chosen weights for
3981  errors in the prediction of different components of X). It can be done
3982  with a call of MCPDSetPredictionWeights() function.
3983 
3984 5. User calls MCPDSolve() function which takes algorithm state and
3985  pointer (delegate, etc.) to callback function which calculates F/G.
3986 
3987 6. User calls MCPDResults() to get solution
3988 
3989 INPUT PARAMETERS:
3990  N - problem dimension, N>=1
3991 
3992 OUTPUT PARAMETERS:
3993  State - structure stores algorithm state
3994 
3995  -- ALGLIB --
3996  Copyright 23.05.2010 by Bochkanov Sergey
3997 *************************************************************************/
3998 void mcpdcreate(const ae_int_t n, mcpdstate &s);
3999 
4000 
4001 /*************************************************************************
4002 DESCRIPTION:
4003 
4004 This function is a specialized version of MCPDCreate() function, and we
4005 recommend you to read comments for this function for general information
4006 about MCPD solver.
4007 
4008 This function creates MCPD (Markov Chains for Population Data) solver
4009 for "Entry-state" model, i.e. model where transition from X[i] to X[i+1]
4010 is modelled as
4011  X[i+1] = P*X[i]
4012 where
4013  X[i] and X[i+1] are N-dimensional state vectors
4014  P is a N*N transition matrix
4015 and one selected component of X[] is called "entry" state and is treated
4016 in a special way:
4017  system state always transits from "entry" state to some another state
4018  system state can not transit from any state into "entry" state
4019 Such conditions basically mean that row of P which corresponds to "entry"
4020 state is zero.
4021 
4022 Such models arise when:
4023 * there is some population of individuals
4024 * individuals can have different states
4025 * individuals can transit from one state to another
4026 * population size is NOT constant - at every moment of time there is some
4027  (unpredictable) amount of "new" individuals, which can transit into one
4028  of the states at the next turn, but still no one leaves population
4029 * you want to model transitions of individuals from one state into another
4030 * but you do NOT want to predict amount of "new" individuals because it
4031  does not depends on individuals already present (hence system can not
4032  transit INTO entry state - it can only transit FROM it).
4033 
4034 This model is discussed in more details in the ALGLIB User Guide (see
4035 http://www.alglib.net/dataanalysis/ for more data).
4036 
4037 INPUT PARAMETERS:
4038  N - problem dimension, N>=2
4039  EntryState- index of entry state, in 0..N-1
4040 
4041 OUTPUT PARAMETERS:
4042  State - structure stores algorithm state
4043 
4044  -- ALGLIB --
4045  Copyright 23.05.2010 by Bochkanov Sergey
4046 *************************************************************************/
4047 void mcpdcreateentry(const ae_int_t n, const ae_int_t entrystate, mcpdstate &s);
4048 
4049 
4050 /*************************************************************************
4051 DESCRIPTION:
4052 
4053 This function is a specialized version of MCPDCreate() function, and we
4054 recommend you to read comments for this function for general information
4055 about MCPD solver.
4056 
4057 This function creates MCPD (Markov Chains for Population Data) solver
4058 for "Exit-state" model, i.e. model where transition from X[i] to X[i+1]
4059 is modelled as
4060  X[i+1] = P*X[i]
4061 where
4062  X[i] and X[i+1] are N-dimensional state vectors
4063  P is a N*N transition matrix
4064 and one selected component of X[] is called "exit" state and is treated
4065 in a special way:
4066  system state can transit from any state into "exit" state
4067  system state can not transit from "exit" state into any other state
4068  transition operator discards "exit" state (makes it zero at each turn)
4069 Such conditions basically mean that column of P which corresponds to
4070 "exit" state is zero. Multiplication by such P may decrease sum of vector
4071 components.
4072 
4073 Such models arise when:
4074 * there is some population of individuals
4075 * individuals can have different states
4076 * individuals can transit from one state to another
4077 * population size is NOT constant - individuals can move into "exit" state
4078  and leave population at the next turn, but there are no new individuals
4079 * amount of individuals which leave population can be predicted
4080 * you want to model transitions of individuals from one state into another
4081  (including transitions into the "exit" state)
4082 
4083 This model is discussed in more details in the ALGLIB User Guide (see
4084 http://www.alglib.net/dataanalysis/ for more data).
4085 
4086 INPUT PARAMETERS:
4087  N - problem dimension, N>=2
4088  ExitState- index of exit state, in 0..N-1
4089 
4090 OUTPUT PARAMETERS:
4091  State - structure stores algorithm state
4092 
4093  -- ALGLIB --
4094  Copyright 23.05.2010 by Bochkanov Sergey
4095 *************************************************************************/
4096 void mcpdcreateexit(const ae_int_t n, const ae_int_t exitstate, mcpdstate &s);
4097 
4098 
4099 /*************************************************************************
4100 DESCRIPTION:
4101 
4102 This function is a specialized version of MCPDCreate() function, and we
4103 recommend you to read comments for this function for general information
4104 about MCPD solver.
4105 
4106 This function creates MCPD (Markov Chains for Population Data) solver
4107 for "Entry-Exit-states" model, i.e. model where transition from X[i] to
4108 X[i+1] is modelled as
4109  X[i+1] = P*X[i]
4110 where
4111  X[i] and X[i+1] are N-dimensional state vectors
4112  P is a N*N transition matrix
4113 one selected component of X[] is called "entry" state and is treated in a
4114 special way:
4115  system state always transits from "entry" state to some another state
4116  system state can not transit from any state into "entry" state
4117 and another one component of X[] is called "exit" state and is treated in
4118 a special way too:
4119  system state can transit from any state into "exit" state
4120  system state can not transit from "exit" state into any other state
4121  transition operator discards "exit" state (makes it zero at each turn)
4122 Such conditions basically mean that:
4123  row of P which corresponds to "entry" state is zero
4124  column of P which corresponds to "exit" state is zero
4125 Multiplication by such P may decrease sum of vector components.
4126 
4127 Such models arise when:
4128 * there is some population of individuals
4129 * individuals can have different states
4130 * individuals can transit from one state to another
4131 * population size is NOT constant
4132 * at every moment of time there is some (unpredictable) amount of "new"
4133  individuals, which can transit into one of the states at the next turn
4134 * some individuals can move (predictably) into "exit" state and leave
4135  population at the next turn
4136 * you want to model transitions of individuals from one state into another,
4137  including transitions from the "entry" state and into the "exit" state.
4138 * but you do NOT want to predict amount of "new" individuals because it
4139  does not depends on individuals already present (hence system can not
4140  transit INTO entry state - it can only transit FROM it).
4141 
4142 This model is discussed in more details in the ALGLIB User Guide (see
4143 http://www.alglib.net/dataanalysis/ for more data).
4144 
4145 INPUT PARAMETERS:
4146  N - problem dimension, N>=2
4147  EntryState- index of entry state, in 0..N-1
4148  ExitState- index of exit state, in 0..N-1
4149 
4150 OUTPUT PARAMETERS:
4151  State - structure stores algorithm state
4152 
4153  -- ALGLIB --
4154  Copyright 23.05.2010 by Bochkanov Sergey
4155 *************************************************************************/
4156 void mcpdcreateentryexit(const ae_int_t n, const ae_int_t entrystate, const ae_int_t exitstate, mcpdstate &s);
4157 
4158 
4159 /*************************************************************************
4160 This function is used to add a track - sequence of system states at the
4161 different moments of its evolution.
4162 
4163 You may add one or several tracks to the MCPD solver. In case you have
4164 several tracks, they won't overwrite each other. For example, if you pass
4165 two tracks, A1-A2-A3 (system at t=A+1, t=A+2 and t=A+3) and B1-B2-B3, then
4166 solver will try to model transitions from t=A+1 to t=A+2, t=A+2 to t=A+3,
4167 t=B+1 to t=B+2, t=B+2 to t=B+3. But it WONT mix these two tracks - i.e. it
4168 wont try to model transition from t=A+3 to t=B+1.
4169 
4170 INPUT PARAMETERS:
4171  S - solver
4172  XY - track, array[K,N]:
4173  * I-th row is a state at t=I
4174  * elements of XY must be non-negative (exception will be
4175  thrown on negative elements)
4176  K - number of points in a track
4177  * if given, only leading K rows of XY are used
4178  * if not given, automatically determined from size of XY
4179 
4180 NOTES:
4181 
4182 1. Track may contain either proportional or population data:
4183  * with proportional data all rows of XY must sum to 1.0, i.e. we have
4184  proportions instead of absolute population values
4185  * with population data rows of XY contain population counts and generally
4186  do not sum to 1.0 (although they still must be non-negative)
4187 
4188  -- ALGLIB --
4189  Copyright 23.05.2010 by Bochkanov Sergey
4190 *************************************************************************/
4191 void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy, const ae_int_t k);
4192 void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy);
4193 
4194 
4195 /*************************************************************************
4196 This function is used to add equality constraints on the elements of the
4197 transition matrix P.
4198 
4199 MCPD solver has four types of constraints which can be placed on P:
4200 * user-specified equality constraints (optional)
4201 * user-specified bound constraints (optional)
4202 * user-specified general linear constraints (optional)
4203 * basic constraints (always present):
4204  * non-negativity: P[i,j]>=0
4205  * consistency: every column of P sums to 1.0
4206 
4207 Final constraints which are passed to the underlying optimizer are
4208 calculated as intersection of all present constraints. For example, you
4209 may specify boundary constraint on P[0,0] and equality one:
4210  0.1<=P[0,0]<=0.9
4211  P[0,0]=0.5
4212 Such combination of constraints will be silently reduced to their
4213 intersection, which is P[0,0]=0.5.
4214 
4215 This function can be used to place equality constraints on arbitrary
4216 subset of elements of P. Set of constraints is specified by EC, which may
4217 contain either NAN's or finite numbers from [0,1]. NAN denotes absence of
4218 constraint, finite number denotes equality constraint on specific element
4219 of P.
4220 
4221 You can also use MCPDAddEC() function which allows to ADD equality
4222 constraint for one element of P without changing constraints for other
4223 elements.
4224 
4225 These functions (MCPDSetEC and MCPDAddEC) interact as follows:
4226 * there is internal matrix of equality constraints which is stored in the
4227  MCPD solver
4228 * MCPDSetEC() replaces this matrix by another one (SET)
4229 * MCPDAddEC() modifies one element of this matrix and leaves other ones
4230  unchanged (ADD)
4231 * thus MCPDAddEC() call preserves all modifications done by previous
4232  calls, while MCPDSetEC() completely discards all changes done to the
4233  equality constraints.
4234 
4235 INPUT PARAMETERS:
4236  S - solver
4237  EC - equality constraints, array[N,N]. Elements of EC can be
4238  either NAN's or finite numbers from [0,1]. NAN denotes
4239  absence of constraints, while finite value denotes
4240  equality constraint on the corresponding element of P.
4241 
4242 NOTES:
4243 
4244 1. infinite values of EC will lead to exception being thrown. Values less
4245 than 0.0 or greater than 1.0 will lead to error code being returned after
4246 call to MCPDSolve().
4247 
4248  -- ALGLIB --
4249  Copyright 23.05.2010 by Bochkanov Sergey
4250 *************************************************************************/
4251 void mcpdsetec(const mcpdstate &s, const real_2d_array &ec);
4252 
4253 
4254 /*************************************************************************
4255 This function is used to add equality constraints on the elements of the
4256 transition matrix P.
4257 
4258 MCPD solver has four types of constraints which can be placed on P:
4259 * user-specified equality constraints (optional)
4260 * user-specified bound constraints (optional)
4261 * user-specified general linear constraints (optional)
4262 * basic constraints (always present):
4263  * non-negativity: P[i,j]>=0
4264  * consistency: every column of P sums to 1.0
4265 
4266 Final constraints which are passed to the underlying optimizer are
4267 calculated as intersection of all present constraints. For example, you
4268 may specify boundary constraint on P[0,0] and equality one:
4269  0.1<=P[0,0]<=0.9
4270  P[0,0]=0.5
4271 Such combination of constraints will be silently reduced to their
4272 intersection, which is P[0,0]=0.5.
4273 
4274 This function can be used to ADD equality constraint for one element of P
4275 without changing constraints for other elements.
4276 
4277 You can also use MCPDSetEC() function which allows you to specify
4278 arbitrary set of equality constraints in one call.
4279 
4280 These functions (MCPDSetEC and MCPDAddEC) interact as follows:
4281 * there is internal matrix of equality constraints which is stored in the
4282  MCPD solver
4283 * MCPDSetEC() replaces this matrix by another one (SET)
4284 * MCPDAddEC() modifies one element of this matrix and leaves other ones
4285  unchanged (ADD)
4286 * thus MCPDAddEC() call preserves all modifications done by previous
4287  calls, while MCPDSetEC() completely discards all changes done to the
4288  equality constraints.
4289 
4290 INPUT PARAMETERS:
4291  S - solver
4292  I - row index of element being constrained
4293  J - column index of element being constrained
4294  C - value (constraint for P[I,J]). Can be either NAN (no
4295  constraint) or finite value from [0,1].
4296 
4297 NOTES:
4298 
4299 1. infinite values of C will lead to exception being thrown. Values less
4300 than 0.0 or greater than 1.0 will lead to error code being returned after
4301 call to MCPDSolve().
4302 
4303  -- ALGLIB --
4304  Copyright 23.05.2010 by Bochkanov Sergey
4305 *************************************************************************/
4306 void mcpdaddec(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double c);
4307 
4308 
4309 /*************************************************************************
4310 This function is used to add bound constraints on the elements of the
4311 transition matrix P.
4312 
4313 MCPD solver has four types of constraints which can be placed on P:
4314 * user-specified equality constraints (optional)
4315 * user-specified bound constraints (optional)
4316 * user-specified general linear constraints (optional)
4317 * basic constraints (always present):
4318  * non-negativity: P[i,j]>=0
4319  * consistency: every column of P sums to 1.0
4320 
4321 Final constraints which are passed to the underlying optimizer are
4322 calculated as intersection of all present constraints. For example, you
4323 may specify boundary constraint on P[0,0] and equality one:
4324  0.1<=P[0,0]<=0.9
4325  P[0,0]=0.5
4326 Such combination of constraints will be silently reduced to their
4327 intersection, which is P[0,0]=0.5.
4328 
4329 This function can be used to place bound constraints on arbitrary
4330 subset of elements of P. Set of constraints is specified by BndL/BndU
4331 matrices, which may contain arbitrary combination of finite numbers or
4332 infinities (like -INF<x<=0.5 or 0.1<=x<+INF).
4333 
4334 You can also use MCPDAddBC() function which allows to ADD bound constraint
4335 for one element of P without changing constraints for other elements.
4336 
4337 These functions (MCPDSetBC and MCPDAddBC) interact as follows:
4338 * there is internal matrix of bound constraints which is stored in the
4339  MCPD solver
4340 * MCPDSetBC() replaces this matrix by another one (SET)
4341 * MCPDAddBC() modifies one element of this matrix and leaves other ones
4342  unchanged (ADD)
4343 * thus MCPDAddBC() call preserves all modifications done by previous
4344  calls, while MCPDSetBC() completely discards all changes done to the
4345  equality constraints.
4346 
4347 INPUT PARAMETERS:
4348  S - solver
4349  BndL - lower bounds constraints, array[N,N]. Elements of BndL can
4350  be finite numbers or -INF.
4351  BndU - upper bounds constraints, array[N,N]. Elements of BndU can
4352  be finite numbers or +INF.
4353 
4354  -- ALGLIB --
4355  Copyright 23.05.2010 by Bochkanov Sergey
4356 *************************************************************************/
4357 void mcpdsetbc(const mcpdstate &s, const real_2d_array &bndl, const real_2d_array &bndu);
4358 
4359 
4360 /*************************************************************************
4361 This function is used to add bound constraints on the elements of the
4362 transition matrix P.
4363 
4364 MCPD solver has four types of constraints which can be placed on P:
4365 * user-specified equality constraints (optional)
4366 * user-specified bound constraints (optional)
4367 * user-specified general linear constraints (optional)
4368 * basic constraints (always present):
4369  * non-negativity: P[i,j]>=0
4370  * consistency: every column of P sums to 1.0
4371 
4372 Final constraints which are passed to the underlying optimizer are
4373 calculated as intersection of all present constraints. For example, you
4374 may specify boundary constraint on P[0,0] and equality one:
4375  0.1<=P[0,0]<=0.9
4376  P[0,0]=0.5
4377 Such combination of constraints will be silently reduced to their
4378 intersection, which is P[0,0]=0.5.
4379 
4380 This function can be used to ADD bound constraint for one element of P
4381 without changing constraints for other elements.
4382 
4383 You can also use MCPDSetBC() function which allows to place bound
4384 constraints on arbitrary subset of elements of P. Set of constraints is
4385 specified by BndL/BndU matrices, which may contain arbitrary combination
4386 of finite numbers or infinities (like -INF<x<=0.5 or 0.1<=x<+INF).
4387 
4388 These functions (MCPDSetBC and MCPDAddBC) interact as follows:
4389 * there is internal matrix of bound constraints which is stored in the
4390  MCPD solver
4391 * MCPDSetBC() replaces this matrix by another one (SET)
4392 * MCPDAddBC() modifies one element of this matrix and leaves other ones
4393  unchanged (ADD)
4394 * thus MCPDAddBC() call preserves all modifications done by previous
4395  calls, while MCPDSetBC() completely discards all changes done to the
4396  equality constraints.
4397 
4398 INPUT PARAMETERS:
4399  S - solver
4400  I - row index of element being constrained
4401  J - column index of element being constrained
4402  BndL - lower bound
4403  BndU - upper bound
4404 
4405  -- ALGLIB --
4406  Copyright 23.05.2010 by Bochkanov Sergey
4407 *************************************************************************/
4408 void mcpdaddbc(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double bndl, const double bndu);
4409 
4410 
4411 /*************************************************************************
4412 This function is used to set linear equality/inequality constraints on the
4413 elements of the transition matrix P.
4414 
4415 This function can be used to set one or several general linear constraints
4416 on the elements of P. Two types of constraints are supported:
4417 * equality constraints
4418 * inequality constraints (both less-or-equal and greater-or-equal)
4419 
4420 Coefficients of constraints are specified by matrix C (one of the
4421 parameters). One row of C corresponds to one constraint. Because
4422 transition matrix P has N*N elements, we need N*N columns to store all
4423 coefficients (they are stored row by row), and one more column to store
4424 right part - hence C has N*N+1 columns. Constraint kind is stored in the
4425 CT array.
4426 
4427 Thus, I-th linear constraint is
4428  P[0,0]*C[I,0] + P[0,1]*C[I,1] + .. + P[0,N-1]*C[I,N-1] +
4429  + P[1,0]*C[I,N] + P[1,1]*C[I,N+1] + ... +
4430  + P[N-1,N-1]*C[I,N*N-1] ?=? C[I,N*N]
4431 where ?=? can be either "=" (CT[i]=0), "<=" (CT[i]<0) or ">=" (CT[i]>0).
4432 
4433 Your constraint may involve only some subset of P (less than N*N elements).
4434 For example it can be something like
4435  P[0,0] + P[0,1] = 0.5
4436 In this case you still should pass matrix with N*N+1 columns, but all its
4437 elements (except for C[0,0], C[0,1] and C[0,N*N-1]) will be zero.
4438 
4439 INPUT PARAMETERS:
4440  S - solver
4441  C - array[K,N*N+1] - coefficients of constraints
4442  (see above for complete description)
4443  CT - array[K] - constraint types
4444  (see above for complete description)
4445  K - number of equality/inequality constraints, K>=0:
4446  * if given, only leading K elements of C/CT are used
4447  * if not given, automatically determined from sizes of C/CT
4448 
4449  -- ALGLIB --
4450  Copyright 23.05.2010 by Bochkanov Sergey
4451 *************************************************************************/
4452 void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
4453 void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct);
4454 
4455 
4456 /*************************************************************************
4457 This function allows to tune amount of Tikhonov regularization being
4458 applied to your problem.
4459 
4460 By default, regularizing term is equal to r*||P-prior_P||^2, where r is a
4461 small non-zero value, P is transition matrix, prior_P is identity matrix,
4462 ||X||^2 is a sum of squared elements of X.
4463 
4464 This function allows you to change coefficient r. You can also change
4465 prior values with MCPDSetPrior() function.
4466 
4467 INPUT PARAMETERS:
4468  S - solver
4469  V - regularization coefficient, finite non-negative value. It
4470  is not recommended to specify zero value unless you are
4471  pretty sure that you want it.
4472 
4473  -- ALGLIB --
4474  Copyright 23.05.2010 by Bochkanov Sergey
4475 *************************************************************************/
4476 void mcpdsettikhonovregularizer(const mcpdstate &s, const double v);
4477 
4478 
4479 /*************************************************************************
4480 This function allows to set prior values used for regularization of your
4481 problem.
4482 
4483 By default, regularizing term is equal to r*||P-prior_P||^2, where r is a
4484 small non-zero value, P is transition matrix, prior_P is identity matrix,
4485 ||X||^2 is a sum of squared elements of X.
4486 
4487 This function allows you to change prior values prior_P. You can also
4488 change r with MCPDSetTikhonovRegularizer() function.
4489 
4490 INPUT PARAMETERS:
4491  S - solver
4492  PP - array[N,N], matrix of prior values:
4493  1. elements must be real numbers from [0,1]
4494  2. columns must sum to 1.0.
4495  First property is checked (exception is thrown otherwise),
4496  while second one is not checked/enforced.
4497 
4498  -- ALGLIB --
4499  Copyright 23.05.2010 by Bochkanov Sergey
4500 *************************************************************************/
4501 void mcpdsetprior(const mcpdstate &s, const real_2d_array &pp);
4502 
4503 
4504 /*************************************************************************
4505 This function is used to change prediction weights
4506 
4507 MCPD solver scales prediction errors as follows
4508  Error(P) = ||W*(y-P*x)||^2
4509 where
4510  x is a system state at time t
4511  y is a system state at time t+1
4512  P is a transition matrix
4513  W is a diagonal scaling matrix
4514 
4515 By default, weights are chosen in order to minimize relative prediction
4516 error instead of absolute one. For example, if one component of state is
4517 about 0.5 in magnitude and another one is about 0.05, then algorithm will
4518 make corresponding weights equal to 2.0 and 20.0.
4519 
4520 INPUT PARAMETERS:
4521  S - solver
4522  PW - array[N], weights:
4523  * must be non-negative values (exception will be thrown otherwise)
4524  * zero values will be replaced by automatically chosen values
4525 
4526  -- ALGLIB --
4527  Copyright 23.05.2010 by Bochkanov Sergey
4528 *************************************************************************/
4530 
4531 
4532 /*************************************************************************
4533 This function is used to start solution of the MCPD problem.
4534 
4535 After return from this function, you can use MCPDResults() to get solution
4536 and completion code.
4537 
4538  -- ALGLIB --
4539  Copyright 23.05.2010 by Bochkanov Sergey
4540 *************************************************************************/
4541 void mcpdsolve(const mcpdstate &s);
4542 
4543 
4544 /*************************************************************************
4545 MCPD results
4546 
4547 INPUT PARAMETERS:
4548  State - algorithm state
4549 
4550 OUTPUT PARAMETERS:
4551  P - array[N,N], transition matrix
4552  Rep - optimization report. You should check Rep.TerminationType
4553  in order to distinguish successful termination from
4554  unsuccessful one. Speaking short, positive values denote
4555  success, negative ones are failures.
4556  More information about fields of this structure can be
4557  found in the comments on MCPDReport datatype.
4558 
4559 
4560  -- ALGLIB --
4561  Copyright 23.05.2010 by Bochkanov Sergey
4562 *************************************************************************/
4564 
4565 /*************************************************************************
4566 This function serializes data structure to string.
4567 
4568 Important properties of s_out:
4569 * it contains alphanumeric characters, dots, underscores, minus signs
4570 * these symbols are grouped into words, which are separated by spaces
4571  and Windows-style (CR+LF) newlines
4572 * although serializer uses spaces and CR+LF as separators, you can
4573  replace any separator character by arbitrary combination of spaces,
4574  tabs, Windows or Unix newlines. It allows flexible reformatting of
4575  the string in case you want to include it into text or XML file.
4576  But you should not insert separators into the middle of the "words"
4577  nor you should change case of letters.
4578 * s_out can be freely moved between 32-bit and 64-bit systems, little
4579  and big endian machines, and so on. You can serialize structure on
4580  32-bit machine and unserialize it on 64-bit one (or vice versa), or
4581  serialize it on SPARC and unserialize on x86. You can also
4582  serialize it in C++ version of ALGLIB and unserialize in C# one,
4583  and vice versa.
4584 *************************************************************************/
4585 void mlpeserialize(mlpensemble &obj, std::string &s_out);
4586 
4587 
4588 /*************************************************************************
4589 This function unserializes data structure from string.
4590 *************************************************************************/
4591 void mlpeunserialize(const std::string &s_in, mlpensemble &obj);
4592 
4593 
4594 
4595 
4596 /*************************************************************************
4597 This function serializes data structure to C++ stream.
4598 
4599 Data stream generated by this function is same as string representation
4600 generated by string version of serializer - alphanumeric characters,
4601 dots, underscores, minus signs, which are grouped into words separated by
4602 spaces and CR+LF.
4603 
4604 We recommend you to read comments on string version of serializer to find
4605 out more about serialization of AlGLIB objects.
4606 *************************************************************************/
4607 void mlpeserialize(mlpensemble &obj, std::ostream &s_out);
4608 
4609 
4610 /*************************************************************************
4611 This function unserializes data structure from stream.
4612 *************************************************************************/
4613 void mlpeunserialize(const std::istream &s_in, mlpensemble &obj);
4614 
4615 
4616 /*************************************************************************
4617 Like MLPCreate0, but for ensembles.
4618 
4619  -- ALGLIB --
4620  Copyright 18.02.2009 by Bochkanov Sergey
4621 *************************************************************************/
4622 void mlpecreate0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble);
4623 
4624 
4625 /*************************************************************************
4626 Like MLPCreate1, but for ensembles.
4627 
4628  -- ALGLIB --
4629  Copyright 18.02.2009 by Bochkanov Sergey
4630 *************************************************************************/
4631 void mlpecreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble);
4632 
4633 
4634 /*************************************************************************
4635 Like MLPCreate2, but for ensembles.
4636 
4637  -- ALGLIB --
4638  Copyright 18.02.2009 by Bochkanov Sergey
4639 *************************************************************************/
4640 void mlpecreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble);
4641 
4642 
4643 /*************************************************************************
4644 Like MLPCreateB0, but for ensembles.
4645 
4646  -- ALGLIB --
4647  Copyright 18.02.2009 by Bochkanov Sergey
4648 *************************************************************************/
4649 void mlpecreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble);
4650 
4651 
4652 /*************************************************************************
4653 Like MLPCreateB1, but for ensembles.
4654 
4655  -- ALGLIB --
4656  Copyright 18.02.2009 by Bochkanov Sergey
4657 *************************************************************************/
4658 void mlpecreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble);
4659 
4660 
4661 /*************************************************************************
4662 Like MLPCreateB2, but for ensembles.
4663 
4664  -- ALGLIB --
4665  Copyright 18.02.2009 by Bochkanov Sergey
4666 *************************************************************************/
4667 void mlpecreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble);
4668 
4669 
4670 /*************************************************************************
4671 Like MLPCreateR0, but for ensembles.
4672 
4673  -- ALGLIB --
4674  Copyright 18.02.2009 by Bochkanov Sergey
4675 *************************************************************************/
4676 void mlpecreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble);
4677 
4678 
4679 /*************************************************************************
4680 Like MLPCreateR1, but for ensembles.
4681 
4682  -- ALGLIB --
4683  Copyright 18.02.2009 by Bochkanov Sergey
4684 *************************************************************************/
4685 void mlpecreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble);
4686 
4687 
4688 /*************************************************************************
4689 Like MLPCreateR2, but for ensembles.
4690 
4691  -- ALGLIB --
4692  Copyright 18.02.2009 by Bochkanov Sergey
4693 *************************************************************************/
4694 void mlpecreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble);
4695 
4696 
4697 /*************************************************************************
4698 Like MLPCreateC0, but for ensembles.
4699 
4700  -- ALGLIB --
4701  Copyright 18.02.2009 by Bochkanov Sergey
4702 *************************************************************************/
4703 void mlpecreatec0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble);
4704 
4705 
4706 /*************************************************************************
4707 Like MLPCreateC1, but for ensembles.
4708 
4709  -- ALGLIB --
4710  Copyright 18.02.2009 by Bochkanov Sergey
4711 *************************************************************************/
4712 void mlpecreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble);
4713 
4714 
4715 /*************************************************************************
4716 Like MLPCreateC2, but for ensembles.
4717 
4718  -- ALGLIB --
4719  Copyright 18.02.2009 by Bochkanov Sergey
4720 *************************************************************************/
4721 void mlpecreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble);
4722 
4723 
4724 /*************************************************************************
4725 Creates ensemble from network. Only network geometry is copied.
4726 
4727  -- ALGLIB --
4728  Copyright 17.02.2009 by Bochkanov Sergey
4729 *************************************************************************/
4730 void mlpecreatefromnetwork(const multilayerperceptron &network, const ae_int_t ensemblesize, mlpensemble &ensemble);
4731 
4732 
4733 /*************************************************************************
4734 Randomization of MLP ensemble
4735 
4736  -- ALGLIB --
4737  Copyright 17.02.2009 by Bochkanov Sergey
4738 *************************************************************************/
4739 void mlperandomize(const mlpensemble &ensemble);
4740 
4741 
4742 /*************************************************************************
4743 Return ensemble properties (number of inputs and outputs).
4744 
4745  -- ALGLIB --
4746  Copyright 17.02.2009 by Bochkanov Sergey
4747 *************************************************************************/
4748 void mlpeproperties(const mlpensemble &ensemble, ae_int_t &nin, ae_int_t &nout);
4749 
4750 
4751 /*************************************************************************
4752 Return normalization type (whether ensemble is SOFTMAX-normalized or not).
4753 
4754  -- ALGLIB --
4755  Copyright 17.02.2009 by Bochkanov Sergey
4756 *************************************************************************/
4757 bool mlpeissoftmax(const mlpensemble &ensemble);
4758 
4759 
4760 /*************************************************************************
4761 Procesing
4762 
4763 INPUT PARAMETERS:
4764  Ensemble- neural networks ensemble
4765  X - input vector, array[0..NIn-1].
4766  Y - (possibly) preallocated buffer; if size of Y is less than
4767  NOut, it will be reallocated. If it is large enough, it
4768  is NOT reallocated, so we can save some time on reallocation.
4769 
4770 
4771 OUTPUT PARAMETERS:
4772  Y - result. Regression estimate when solving regression task,
4773  vector of posterior probabilities for classification task.
4774 
4775  -- ALGLIB --
4776  Copyright 17.02.2009 by Bochkanov Sergey
4777 *************************************************************************/
4778 void mlpeprocess(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y);
4779 
4780 
4781 /*************************************************************************
4782 'interactive' variant of MLPEProcess for languages like Python which
4783 support constructs like "Y = MLPEProcess(LM,X)" and interactive mode of the
4784 interpreter
4785 
4786 This function allocates new array on each call, so it is significantly
4787 slower than its 'non-interactive' counterpart, but it is more convenient
4788 when you call it from command line.
4789 
4790  -- ALGLIB --
4791  Copyright 17.02.2009 by Bochkanov Sergey
4792 *************************************************************************/
4793 void mlpeprocessi(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y);
4794 
4795 
4796 /*************************************************************************
4797 Relative classification error on the test set
4798 
4799 INPUT PARAMETERS:
4800  Ensemble- ensemble
4801  XY - test set
4802  NPoints - test set size
4803 
4804 RESULT:
4805  percent of incorrectly classified cases.
4806  Works both for classifier betwork and for regression networks which
4807 are used as classifiers.
4808 
4809  -- ALGLIB --
4810  Copyright 17.02.2009 by Bochkanov Sergey
4811 *************************************************************************/
4812 double mlperelclserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints);
4813 
4814 
4815 /*************************************************************************
4816 Average cross-entropy (in bits per element) on the test set
4817 
4818 INPUT PARAMETERS:
4819  Ensemble- ensemble
4820  XY - test set
4821  NPoints - test set size
4822 
4823 RESULT:
4824  CrossEntropy/(NPoints*LN(2)).
4825  Zero if ensemble solves regression task.
4826 
4827  -- ALGLIB --
4828  Copyright 17.02.2009 by Bochkanov Sergey
4829 *************************************************************************/
4830 double mlpeavgce(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints);
4831 
4832 
4833 /*************************************************************************
4834 RMS error on the test set
4835 
4836 INPUT PARAMETERS:
4837  Ensemble- ensemble
4838  XY - test set
4839  NPoints - test set size
4840 
4841 RESULT:
4842  root mean square error.
4843  Its meaning for regression task is obvious. As for classification task
4844 RMS error means error when estimating posterior probabilities.
4845 
4846  -- ALGLIB --
4847  Copyright 17.02.2009 by Bochkanov Sergey
4848 *************************************************************************/
4849 double mlpermserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints);
4850 
4851 
4852 /*************************************************************************
4853 Average error on the test set
4854 
4855 INPUT PARAMETERS:
4856  Ensemble- ensemble
4857  XY - test set
4858  NPoints - test set size
4859 
4860 RESULT:
4861  Its meaning for regression task is obvious. As for classification task
4862 it means average error when estimating posterior probabilities.
4863 
4864  -- ALGLIB --
4865  Copyright 17.02.2009 by Bochkanov Sergey
4866 *************************************************************************/
4867 double mlpeavgerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints);
4868 
4869 
4870 /*************************************************************************
4871 Average relative error on the test set
4872 
4873 INPUT PARAMETERS:
4874  Ensemble- ensemble
4875  XY - test set
4876  NPoints - test set size
4877 
4878 RESULT:
4879  Its meaning for regression task is obvious. As for classification task
4880 it means average relative error when estimating posterior probabilities.
4881 
4882  -- ALGLIB --
4883  Copyright 17.02.2009 by Bochkanov Sergey
4884 *************************************************************************/
4885 double mlpeavgrelerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints);
4886 
4887 /*************************************************************************
4888 Neural network training using modified Levenberg-Marquardt with exact
4889 Hessian calculation and regularization. Subroutine trains neural network
4890 with restarts from random positions. Algorithm is well suited for small
4891 and medium scale problems (hundreds of weights).
4892 
4893 INPUT PARAMETERS:
4894  Network - neural network with initialized geometry
4895  XY - training set
4896  NPoints - training set size
4897  Decay - weight decay constant, >=0.001
4898  Decay term 'Decay*||Weights||^2' is added to error
4899  function.
4900  If you don't know what Decay to choose, use 0.001.
4901  Restarts - number of restarts from random position, >0.
4902  If you don't know what Restarts to choose, use 2.
4903 
4904 OUTPUT PARAMETERS:
4905  Network - trained neural network.
4906  Info - return code:
4907  * -9, if internal matrix inverse subroutine failed
4908  * -2, if there is a point with class number
4909  outside of [0..NOut-1].
4910  * -1, if wrong parameters specified
4911  (NPoints<0, Restarts<1).
4912  * 2, if task has been solved.
4913  Rep - training report
4914 
4915  -- ALGLIB --
4916  Copyright 10.03.2009 by Bochkanov Sergey
4917 *************************************************************************/
4918 void mlptrainlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep);
4919 
4920 
4921 /*************************************************************************
4922 Neural network training using L-BFGS algorithm with regularization.
4923 Subroutine trains neural network with restarts from random positions.
4924 Algorithm is well suited for problems of any dimensionality (memory
4925 requirements and step complexity are linear by weights number).
4926 
4927 INPUT PARAMETERS:
4928  Network - neural network with initialized geometry
4929  XY - training set
4930  NPoints - training set size
4931  Decay - weight decay constant, >=0.001
4932  Decay term 'Decay*||Weights||^2' is added to error
4933  function.
4934  If you don't know what Decay to choose, use 0.001.
4935  Restarts - number of restarts from random position, >0.
4936  If you don't know what Restarts to choose, use 2.
4937  WStep - stopping criterion. Algorithm stops if step size is
4938  less than WStep. Recommended value - 0.01. Zero step
4939  size means stopping after MaxIts iterations.
4940  MaxIts - stopping criterion. Algorithm stops after MaxIts
4941  iterations (NOT gradient calculations). Zero MaxIts
4942  means stopping when step is sufficiently small.
4943 
4944 OUTPUT PARAMETERS:
4945  Network - trained neural network.
4946  Info - return code:
4947  * -8, if both WStep=0 and MaxIts=0
4948  * -2, if there is a point with class number
4949  outside of [0..NOut-1].
4950  * -1, if wrong parameters specified
4951  (NPoints<0, Restarts<1).
4952  * 2, if task has been solved.
4953  Rep - training report
4954 
4955  -- ALGLIB --
4956  Copyright 09.12.2007 by Bochkanov Sergey
4957 *************************************************************************/
4958 void mlptrainlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep);
4959 
4960 
4961 /*************************************************************************
4962 Neural network training using early stopping (base algorithm - L-BFGS with
4963 regularization).
4964 
4965 INPUT PARAMETERS:
4966  Network - neural network with initialized geometry
4967  TrnXY - training set
4968  TrnSize - training set size, TrnSize>0
4969  ValXY - validation set
4970  ValSize - validation set size, ValSize>0
4971  Decay - weight decay constant, >=0.001
4972  Decay term 'Decay*||Weights||^2' is added to error
4973  function.
4974  If you don't know what Decay to choose, use 0.001.
4975  Restarts - number of restarts, either:
4976  * strictly positive number - algorithm make specified
4977  number of restarts from random position.
4978  * -1, in which case algorithm makes exactly one run
4979  from the initial state of the network (no randomization).
4980  If you don't know what Restarts to choose, choose one
4981  one the following:
4982  * -1 (deterministic start)
4983  * +1 (one random restart)
4984  * +5 (moderate amount of random restarts)
4985 
4986 OUTPUT PARAMETERS:
4987  Network - trained neural network.
4988  Info - return code:
4989  * -2, if there is a point with class number
4990  outside of [0..NOut-1].
4991  * -1, if wrong parameters specified
4992  (NPoints<0, Restarts<1, ...).
4993  * 2, task has been solved, stopping criterion met -
4994  sufficiently small step size. Not expected (we
4995  use EARLY stopping) but possible and not an
4996  error.
4997  * 6, task has been solved, stopping criterion met -
4998  increasing of validation set error.
4999  Rep - training report
5000 
5001 NOTE:
5002 
5003 Algorithm stops if validation set error increases for a long enough or
5004 step size is small enought (there are task where validation set may
5005 decrease for eternity). In any case solution returned corresponds to the
5006 minimum of validation set error.
5007 
5008  -- ALGLIB --
5009  Copyright 10.03.2009 by Bochkanov Sergey
5010 *************************************************************************/
5011 void mlptraines(const multilayerperceptron &network, const real_2d_array &trnxy, const ae_int_t trnsize, const real_2d_array &valxy, const ae_int_t valsize, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep);
5012 
5013 
5014 /*************************************************************************
5015 Cross-validation estimate of generalization error.
5016 
5017 Base algorithm - L-BFGS.
5018 
5019 INPUT PARAMETERS:
5020  Network - neural network with initialized geometry. Network is
5021  not changed during cross-validation - it is used only
5022  as a representative of its architecture.
5023  XY - training set.
5024  SSize - training set size
5025  Decay - weight decay, same as in MLPTrainLBFGS
5026  Restarts - number of restarts, >0.
5027  restarts are counted for each partition separately, so
5028  total number of restarts will be Restarts*FoldsCount.
5029  WStep - stopping criterion, same as in MLPTrainLBFGS
5030  MaxIts - stopping criterion, same as in MLPTrainLBFGS
5031  FoldsCount - number of folds in k-fold cross-validation,
5032  2<=FoldsCount<=SSize.
5033  recommended value: 10.
5034 
5035 OUTPUT PARAMETERS:
5036  Info - return code, same as in MLPTrainLBFGS
5037  Rep - report, same as in MLPTrainLM/MLPTrainLBFGS
5038  CVRep - generalization error estimates
5039 
5040  -- ALGLIB --
5041  Copyright 09.12.2007 by Bochkanov Sergey
5042 *************************************************************************/
5043 void mlpkfoldcvlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep);
5044 
5045 
5046 /*************************************************************************
5047 Cross-validation estimate of generalization error.
5048 
5049 Base algorithm - Levenberg-Marquardt.
5050 
5051 INPUT PARAMETERS:
5052  Network - neural network with initialized geometry. Network is
5053  not changed during cross-validation - it is used only
5054  as a representative of its architecture.
5055  XY - training set.
5056  SSize - training set size
5057  Decay - weight decay, same as in MLPTrainLBFGS
5058  Restarts - number of restarts, >0.
5059  restarts are counted for each partition separately, so
5060  total number of restarts will be Restarts*FoldsCount.
5061  FoldsCount - number of folds in k-fold cross-validation,
5062  2<=FoldsCount<=SSize.
5063  recommended value: 10.
5064 
5065 OUTPUT PARAMETERS:
5066  Info - return code, same as in MLPTrainLBFGS
5067  Rep - report, same as in MLPTrainLM/MLPTrainLBFGS
5068  CVRep - generalization error estimates
5069 
5070  -- ALGLIB --
5071  Copyright 09.12.2007 by Bochkanov Sergey
5072 *************************************************************************/
5073 void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep);
5074 
5075 
5076 /*************************************************************************
5077 This function estimates generalization error using cross-validation on the
5078 current dataset with current training settings.
5079 
5080 FOR USERS OF COMMERCIAL EDITION:
5081 
5082  ! Commercial version of ALGLIB includes two important improvements of
5083  ! this function:
5084  ! * multicore support (C++ and C# computational cores)
5085  ! * SSE support (C++ computational core)
5086  !
5087  ! Second improvement gives constant speedup (2-3X). First improvement
5088  ! gives close-to-linear speedup on multicore systems. Following
5089  ! operations can be executed in parallel:
5090  ! * FoldsCount cross-validation rounds (always)
5091  ! * NRestarts training sessions performed within each of
5092  ! cross-validation rounds (if NRestarts>1)
5093  ! * gradient calculation over large dataset (if dataset is large enough)
5094  !
5095  ! In order to use multicore features you have to:
5096  ! * use commercial version of ALGLIB
5097  ! * call this function with "smp_" prefix, which indicates that
5098  ! multicore code will be used (for multicore support)
5099  !
5100  ! In order to use SSE features you have to:
5101  ! * use commercial version of ALGLIB on Intel processors
5102  ! * use C++ computational core
5103  !
5104  ! This note is given for users of commercial edition; if you use GPL
5105  ! edition, you still will be able to call smp-version of this function,
5106  ! but all computations will be done serially.
5107  !
5108  ! We recommend you to carefully read ALGLIB Reference Manual, section
5109  ! called 'SMP support', before using parallel version of this function.
5110 
5111 INPUT PARAMETERS:
5112  S - trainer object
5113  Network - neural network. It must have same number of inputs and
5114  output/classes as was specified during creation of the
5115  trainer object. Network is not changed during cross-
5116  validation and is not trained - it is used only as
5117  representative of its architecture. I.e., we estimate
5118  generalization properties of ARCHITECTURE, not some
5119  specific network.
5120  NRestarts - number of restarts, >=0:
5121  * NRestarts>0 means that for each cross-validation
5122  round specified number of random restarts is
5123  performed, with best network being chosen after
5124  training.
5125  * NRestarts=0 is same as NRestarts=1
5126  FoldsCount - number of folds in k-fold cross-validation:
5127  * 2<=FoldsCount<=size of dataset
5128  * recommended value: 10.
5129  * values larger than dataset size will be silently
5130  truncated down to dataset size
5131 
5132 OUTPUT PARAMETERS:
5133  Rep - structure which contains cross-validation estimates:
5134  * Rep.RelCLSError - fraction of misclassified cases.
5135  * Rep.AvgCE - acerage cross-entropy
5136  * Rep.RMSError - root-mean-square error
5137  * Rep.AvgError - average error
5138  * Rep.AvgRelError - average relative error
5139 
5140 NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
5141  or subset with only one point was given, zeros are returned as
5142  estimates.
5143 
5144 NOTE: this method performs FoldsCount cross-validation rounds, each one
5145  with NRestarts random starts. Thus, FoldsCount*NRestarts networks
5146  are trained in total.
5147 
5148 NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems.
5149 
5150 NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError
5151  contain errors in prediction of posterior probabilities.
5152 
5153  -- ALGLIB --
5154  Copyright 23.07.2012 by Bochkanov Sergey
5155 *************************************************************************/
5156 void mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep);
5157 void smp_mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep);
5158 
5159 
5160 /*************************************************************************
5161 Creation of the network trainer object for regression networks
5162 
5163 INPUT PARAMETERS:
5164  NIn - number of inputs, NIn>=1
5165  NOut - number of outputs, NOut>=1
5166 
5167 OUTPUT PARAMETERS:
5168  S - neural network trainer object.
5169  This structure can be used to train any regression
5170  network with NIn inputs and NOut outputs.
5171 
5172  -- ALGLIB --
5173  Copyright 23.07.2012 by Bochkanov Sergey
5174 *************************************************************************/
5175 void mlpcreatetrainer(const ae_int_t nin, const ae_int_t nout, mlptrainer &s);
5176 
5177 
5178 /*************************************************************************
5179 Creation of the network trainer object for classification networks
5180 
5181 INPUT PARAMETERS:
5182  NIn - number of inputs, NIn>=1
5183  NClasses - number of classes, NClasses>=2
5184 
5185 OUTPUT PARAMETERS:
5186  S - neural network trainer object.
5187  This structure can be used to train any classification
5188  network with NIn inputs and NOut outputs.
5189 
5190  -- ALGLIB --
5191  Copyright 23.07.2012 by Bochkanov Sergey
5192 *************************************************************************/
5193 void mlpcreatetrainercls(const ae_int_t nin, const ae_int_t nclasses, mlptrainer &s);
5194 
5195 
5196 /*************************************************************************
5197 This function sets "current dataset" of the trainer object to one passed
5198 by user.
5199 
5200 INPUT PARAMETERS:
5201  S - trainer object
5202  XY - training set, see below for information on the
5203  training set format. This function checks correctness
5204  of the dataset (no NANs/INFs, class numbers are
5205  correct) and throws exception when incorrect dataset
5206  is passed.
5207  NPoints - points count, >=0.
5208 
5209 DATASET FORMAT:
5210 
5211 This function uses two different dataset formats - one for regression
5212 networks, another one for classification networks.
5213 
5214 For regression networks with NIn inputs and NOut outputs following dataset
5215 format is used:
5216 * dataset is given by NPoints*(NIn+NOut) matrix
5217 * each row corresponds to one example
5218 * first NIn columns are inputs, next NOut columns are outputs
5219 
5220 For classification networks with NIn inputs and NClasses clases following
5221 datasetformat is used:
5222 * dataset is given by NPoints*(NIn+1) matrix
5223 * each row corresponds to one example
5224 * first NIn columns are inputs, last column stores class number (from 0 to
5225  NClasses-1).
5226 
5227  -- ALGLIB --
5228  Copyright 23.07.2012 by Bochkanov Sergey
5229 *************************************************************************/
5230 void mlpsetdataset(const mlptrainer &s, const real_2d_array &xy, const ae_int_t npoints);
5231 
5232 
5233 /*************************************************************************
5234 This function sets "current dataset" of the trainer object to one passed
5235 by user (sparse matrix is used to store dataset).
5236 
5237 INPUT PARAMETERS:
5238  S - trainer object
5239  XY - training set, see below for information on the
5240  training set format. This function checks correctness
5241  of the dataset (no NANs/INFs, class numbers are
5242  correct) and throws exception when incorrect dataset
5243  is passed. Any sparse storage format can be used:
5244  Hash-table, CRS...
5245  NPoints - points count, >=0
5246 
5247 DATASET FORMAT:
5248 
5249 This function uses two different dataset formats - one for regression
5250 networks, another one for classification networks.
5251 
5252 For regression networks with NIn inputs and NOut outputs following dataset
5253 format is used:
5254 * dataset is given by NPoints*(NIn+NOut) matrix
5255 * each row corresponds to one example
5256 * first NIn columns are inputs, next NOut columns are outputs
5257 
5258 For classification networks with NIn inputs and NClasses clases following
5259 datasetformat is used:
5260 * dataset is given by NPoints*(NIn+1) matrix
5261 * each row corresponds to one example
5262 * first NIn columns are inputs, last column stores class number (from 0 to
5263  NClasses-1).
5264 
5265  -- ALGLIB --
5266  Copyright 23.07.2012 by Bochkanov Sergey
5267 *************************************************************************/
5268 void mlpsetsparsedataset(const mlptrainer &s, const sparsematrix &xy, const ae_int_t npoints);
5269 
5270 
5271 /*************************************************************************
5272 This function sets weight decay coefficient which is used for training.
5273 
5274 INPUT PARAMETERS:
5275  S - trainer object
5276  Decay - weight decay coefficient, >=0. Weight decay term
5277  'Decay*||Weights||^2' is added to error function. If
5278  you don't know what Decay to choose, use 1.0E-3.
5279  Weight decay can be set to zero, in this case network
5280  is trained without weight decay.
5281 
5282 NOTE: by default network uses some small nonzero value for weight decay.
5283 
5284  -- ALGLIB --
5285  Copyright 23.07.2012 by Bochkanov Sergey
5286 *************************************************************************/
5287 void mlpsetdecay(const mlptrainer &s, const double decay);
5288 
5289 
5290 /*************************************************************************
5291 This function sets stopping criteria for the optimizer.
5292 
5293 INPUT PARAMETERS:
5294  S - trainer object
5295  WStep - stopping criterion. Algorithm stops if step size is
5296  less than WStep. Recommended value - 0.01. Zero step
5297  size means stopping after MaxIts iterations.
5298  WStep>=0.
5299  MaxIts - stopping criterion. Algorithm stops after MaxIts
5300  epochs (full passes over entire dataset). Zero MaxIts
5301  means stopping when step is sufficiently small.
5302  MaxIts>=0.
5303 
5304 NOTE: by default, WStep=0.005 and MaxIts=0 are used. These values are also
5305  used when MLPSetCond() is called with WStep=0 and MaxIts=0.
5306 
5307 NOTE: these stopping criteria are used for all kinds of neural training -
5308  from "conventional" networks to early stopping ensembles. When used
5309  for "conventional" networks, they are used as the only stopping
5310  criteria. When combined with early stopping, they used as ADDITIONAL
5311  stopping criteria which can terminate early stopping algorithm.
5312 
5313  -- ALGLIB --
5314  Copyright 23.07.2012 by Bochkanov Sergey
5315 *************************************************************************/
5316 void mlpsetcond(const mlptrainer &s, const double wstep, const ae_int_t maxits);
5317 
5318 
5319 /*************************************************************************
5320 This function sets training algorithm: batch training using L-BFGS will be
5321 used.
5322 
5323 This algorithm:
5324 * the most robust for small-scale problems, but may be too slow for large
5325  scale ones.
5326 * perfoms full pass through the dataset before performing step
5327 * uses conditions specified by MLPSetCond() for stopping
5328 * is default one used by trainer object
5329 
5330 INPUT PARAMETERS:
5331  S - trainer object
5332 
5333  -- ALGLIB --
5334  Copyright 23.07.2012 by Bochkanov Sergey
5335 *************************************************************************/
5337 
5338 
5339 /*************************************************************************
5340 This function trains neural network passed to this function, using current
5341 dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
5342 and current training settings. Training from NRestarts random starting
5343 positions is performed, best network is chosen.
5344 
5345 Training is performed using current training algorithm.
5346 
5347 FOR USERS OF COMMERCIAL EDITION:
5348 
5349  ! Commercial version of ALGLIB includes two important improvements of
5350  ! this function:
5351  ! * multicore support (C++ and C# computational cores)
5352  ! * SSE support (C++ computational core)
5353  !
5354  ! Second improvement gives constant speedup (2-3X). First improvement
5355  ! gives close-to-linear speedup on multicore systems. Following
5356  ! operations can be executed in parallel:
5357  ! * NRestarts training sessions performed within each of
5358  ! cross-validation rounds (if NRestarts>1)
5359  ! * gradient calculation over large dataset (if dataset is large enough)
5360  !
5361  ! In order to use multicore features you have to:
5362  ! * use commercial version of ALGLIB
5363  ! * call this function with "smp_" prefix, which indicates that
5364  ! multicore code will be used (for multicore support)
5365  !
5366  ! In order to use SSE features you have to:
5367  ! * use commercial version of ALGLIB on Intel processors
5368  ! * use C++ computational core
5369  !
5370  ! This note is given for users of commercial edition; if you use GPL
5371  ! edition, you still will be able to call smp-version of this function,
5372  ! but all computations will be done serially.
5373  !
5374  ! We recommend you to carefully read ALGLIB Reference Manual, section
5375  ! called 'SMP support', before using parallel version of this function.
5376 
5377 INPUT PARAMETERS:
5378  S - trainer object
5379  Network - neural network. It must have same number of inputs and
5380  output/classes as was specified during creation of the
5381  trainer object.
5382  NRestarts - number of restarts, >=0:
5383  * NRestarts>0 means that specified number of random
5384  restarts are performed, best network is chosen after
5385  training
5386  * NRestarts=0 means that current state of the network
5387  is used for training.
5388 
5389 OUTPUT PARAMETERS:
5390  Network - trained network
5391 
5392 NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
5393  network is filled by zero values. Same behavior for functions
5394  MLPStartTraining and MLPContinueTraining.
5395 
5396 NOTE: this method uses sum-of-squares error function for training.
5397 
5398  -- ALGLIB --
5399  Copyright 23.07.2012 by Bochkanov Sergey
5400 *************************************************************************/
5401 void mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, mlpreport &rep);
5402 void smp_mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, mlpreport &rep);
5403 
5404 
5405 /*************************************************************************
5406 IMPORTANT: this is an "expert" version of the MLPTrain() function. We do
5407  not recommend you to use it unless you are pretty sure that you
5408  need ability to monitor training progress.
5409 
5410 This function performs step-by-step training of the neural network. Here
5411 "step-by-step" means that training starts with MLPStartTraining() call,
5412 and then user subsequently calls MLPContinueTraining() to perform one more
5413 iteration of the training.
5414 
5415 After call to this function trainer object remembers network and is ready
5416 to train it. However, no training is performed until first call to
5417 MLPContinueTraining() function. Subsequent calls to MLPContinueTraining()
5418 will advance training progress one iteration further.
5419 
5420 EXAMPLE:
5421  >
5422  > ...initialize network and trainer object....
5423  >
5424  > MLPStartTraining(Trainer, Network, True)
5425  > while MLPContinueTraining(Trainer, Network) do
5426  > ...visualize training progress...
5427  >
5428 
5429 INPUT PARAMETERS:
5430  S - trainer object
5431  Network - neural network. It must have same number of inputs and
5432  output/classes as was specified during creation of the
5433  trainer object.
5434  RandomStart - randomize network before training or not:
5435  * True means that network is randomized and its
5436  initial state (one which was passed to the trainer
5437  object) is lost.
5438  * False means that training is started from the
5439  current state of the network
5440 
5441 OUTPUT PARAMETERS:
5442  Network - neural network which is ready to training (weights are
5443  initialized, preprocessor is initialized using current
5444  training set)
5445 
5446 NOTE: this method uses sum-of-squares error function for training.
5447 
5448 NOTE: it is expected that trainer object settings are NOT changed during
5449  step-by-step training, i.e. no one changes stopping criteria or
5450  training set during training. It is possible and there is no defense
5451  against such actions, but algorithm behavior in such cases is
5452  undefined and can be unpredictable.
5453 
5454  -- ALGLIB --
5455  Copyright 23.07.2012 by Bochkanov Sergey
5456 *************************************************************************/
5457 void mlpstarttraining(const mlptrainer &s, const multilayerperceptron &network, const bool randomstart);
5458 
5459 
5460 /*************************************************************************
5461 IMPORTANT: this is an "expert" version of the MLPTrain() function. We do
5462  not recommend you to use it unless you are pretty sure that you
5463  need ability to monitor training progress.
5464 
5465 FOR USERS OF COMMERCIAL EDITION:
5466 
5467  ! Commercial version of ALGLIB includes two important improvements of
5468  ! this function:
5469  ! * multicore support (C++ and C# computational cores)
5470  ! * SSE support (C++ computational core)
5471  !
5472  ! Second improvement gives constant speedup (2-3X). First improvement
5473  ! gives close-to-linear speedup on multicore systems. Following
5474  ! operations can be executed in parallel:
5475  ! * gradient calculation over large dataset (if dataset is large enough)
5476  !
5477  ! In order to use multicore features you have to:
5478  ! * use commercial version of ALGLIB
5479  ! * call this function with "smp_" prefix, which indicates that
5480  ! multicore code will be used (for multicore support)
5481  !
5482  ! In order to use SSE features you have to:
5483  ! * use commercial version of ALGLIB on Intel processors
5484  ! * use C++ computational core
5485  !
5486  ! This note is given for users of commercial edition; if you use GPL
5487  ! edition, you still will be able to call smp-version of this function,
5488  ! but all computations will be done serially.
5489  !
5490  ! We recommend you to carefully read ALGLIB Reference Manual, section
5491  ! called 'SMP support', before using parallel version of this function.
5492 
5493 This function performs step-by-step training of the neural network. Here
5494 "step-by-step" means that training starts with MLPStartTraining() call,
5495 and then user subsequently calls MLPContinueTraining() to perform one more
5496 iteration of the training.
5497 
5498 This function performs one more iteration of the training and returns
5499 either True (training continues) or False (training stopped). In case True
5500 was returned, Network weights are updated according to the current state
5501 of the optimization progress. In case False was returned, no additional
5502 updates is performed (previous update of the network weights moved us to
5503 the final point, and no additional updates is needed).
5504 
5505 EXAMPLE:
5506  >
5507  > [initialize network and trainer object]
5508  >
5509  > MLPStartTraining(Trainer, Network, True)
5510  > while MLPContinueTraining(Trainer, Network) do
5511  > [visualize training progress]
5512  >
5513 
5514 INPUT PARAMETERS:
5515  S - trainer object
5516  Network - neural network structure, which is used to store
5517  current state of the training process.
5518 
5519 OUTPUT PARAMETERS:
5520  Network - weights of the neural network are rewritten by the
5521  current approximation.
5522 
5523 NOTE: this method uses sum-of-squares error function for training.
5524 
5525 NOTE: it is expected that trainer object settings are NOT changed during
5526  step-by-step training, i.e. no one changes stopping criteria or
5527  training set during training. It is possible and there is no defense
5528  against such actions, but algorithm behavior in such cases is
5529  undefined and can be unpredictable.
5530 
5531 NOTE: It is expected that Network is the same one which was passed to
5532  MLPStartTraining() function. However, THIS function checks only
5533  following:
5534  * that number of network inputs is consistent with trainer object
5535  settings
5536  * that number of network outputs/classes is consistent with trainer
5537  object settings
5538  * that number of network weights is the same as number of weights in
5539  the network passed to MLPStartTraining() function
5540  Exception is thrown when these conditions are violated.
5541 
5542  It is also expected that you do not change state of the network on
5543  your own - the only party who has right to change network during its
5544  training is a trainer object. Any attempt to interfere with trainer
5545  may lead to unpredictable results.
5546 
5547 
5548  -- ALGLIB --
5549  Copyright 23.07.2012 by Bochkanov Sergey
5550 *************************************************************************/
5551 bool mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &network);
5553 
5554 
5555 /*************************************************************************
5556 Training neural networks ensemble using bootstrap aggregating (bagging).
5557 Modified Levenberg-Marquardt algorithm is used as base training method.
5558 
5559 INPUT PARAMETERS:
5560  Ensemble - model with initialized geometry
5561  XY - training set
5562  NPoints - training set size
5563  Decay - weight decay coefficient, >=0.001
5564  Restarts - restarts, >0.
5565 
5566 OUTPUT PARAMETERS:
5567  Ensemble - trained model
5568  Info - return code:
5569  * -2, if there is a point with class number
5570  outside of [0..NClasses-1].
5571  * -1, if incorrect parameters was passed
5572  (NPoints<0, Restarts<1).
5573  * 2, if task has been solved.
5574  Rep - training report.
5575  OOBErrors - out-of-bag generalization error estimate
5576 
5577  -- ALGLIB --
5578  Copyright 17.02.2009 by Bochkanov Sergey
5579 *************************************************************************/
5580 void mlpebagginglm(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors);
5581 
5582 
5583 /*************************************************************************
5584 Training neural networks ensemble using bootstrap aggregating (bagging).
5585 L-BFGS algorithm is used as base training method.
5586 
5587 INPUT PARAMETERS:
5588  Ensemble - model with initialized geometry
5589  XY - training set
5590  NPoints - training set size
5591  Decay - weight decay coefficient, >=0.001
5592  Restarts - restarts, >0.
5593  WStep - stopping criterion, same as in MLPTrainLBFGS
5594  MaxIts - stopping criterion, same as in MLPTrainLBFGS
5595 
5596 OUTPUT PARAMETERS:
5597  Ensemble - trained model
5598  Info - return code:
5599  * -8, if both WStep=0 and MaxIts=0
5600  * -2, if there is a point with class number
5601  outside of [0..NClasses-1].
5602  * -1, if incorrect parameters was passed
5603  (NPoints<0, Restarts<1).
5604  * 2, if task has been solved.
5605  Rep - training report.
5606  OOBErrors - out-of-bag generalization error estimate
5607 
5608  -- ALGLIB --
5609  Copyright 17.02.2009 by Bochkanov Sergey
5610 *************************************************************************/
5611 void mlpebagginglbfgs(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors);
5612 
5613 
5614 /*************************************************************************
5615 Training neural networks ensemble using early stopping.
5616 
5617 INPUT PARAMETERS:
5618  Ensemble - model with initialized geometry
5619  XY - training set
5620  NPoints - training set size
5621  Decay - weight decay coefficient, >=0.001
5622  Restarts - restarts, >0.
5623 
5624 OUTPUT PARAMETERS:
5625  Ensemble - trained model
5626  Info - return code:
5627  * -2, if there is a point with class number
5628  outside of [0..NClasses-1].
5629  * -1, if incorrect parameters was passed
5630  (NPoints<0, Restarts<1).
5631  * 6, if task has been solved.
5632  Rep - training report.
5633  OOBErrors - out-of-bag generalization error estimate
5634 
5635  -- ALGLIB --
5636  Copyright 10.03.2009 by Bochkanov Sergey
5637 *************************************************************************/
5638 void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep);
5639 
5640 
5641 /*************************************************************************
5642 This function trains neural network ensemble passed to this function using
5643 current dataset and early stopping training algorithm. Each early stopping
5644 round performs NRestarts random restarts (thus, EnsembleSize*NRestarts
5645 training rounds is performed in total).
5646 
5647 FOR USERS OF COMMERCIAL EDITION:
5648 
5649  ! Commercial version of ALGLIB includes two important improvements of
5650  ! this function:
5651  ! * multicore support (C++ and C# computational cores)
5652  ! * SSE support (C++ computational core)
5653  !
5654  ! Second improvement gives constant speedup (2-3X). First improvement
5655  ! gives close-to-linear speedup on multicore systems. Following
5656  ! operations can be executed in parallel:
5657  ! * EnsembleSize training sessions performed for each of ensemble
5658  ! members (always parallelized)
5659  ! * NRestarts training sessions performed within each of training
5660  ! sessions (if NRestarts>1)
5661  ! * gradient calculation over large dataset (if dataset is large enough)
5662  !
5663  ! In order to use multicore features you have to:
5664  ! * use commercial version of ALGLIB
5665  ! * call this function with "smp_" prefix, which indicates that
5666  ! multicore code will be used (for multicore support)
5667  !
5668  ! In order to use SSE features you have to:
5669  ! * use commercial version of ALGLIB on Intel processors
5670  ! * use C++ computational core
5671  !
5672  ! This note is given for users of commercial edition; if you use GPL
5673  ! edition, you still will be able to call smp-version of this function,
5674  ! but all computations will be done serially.
5675  !
5676  ! We recommend you to carefully read ALGLIB Reference Manual, section
5677  ! called 'SMP support', before using parallel version of this function.
5678 
5679 INPUT PARAMETERS:
5680  S - trainer object;
5681  Ensemble - neural network ensemble. It must have same number of
5682  inputs and outputs/classes as was specified during
5683  creation of the trainer object.
5684  NRestarts - number of restarts, >=0:
5685  * NRestarts>0 means that specified number of random
5686  restarts are performed during each ES round;
5687  * NRestarts=0 is silently replaced by 1.
5688 
5689 OUTPUT PARAMETERS:
5690  Ensemble - trained ensemble;
5691  Rep - it contains all type of errors.
5692 
5693 NOTE: this training method uses BOTH early stopping and weight decay! So,
5694  you should select weight decay before starting training just as you
5695  select it before training "conventional" networks.
5696 
5697 NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
5698  or single-point dataset was passed, ensemble is filled by zero
5699  values.
5700 
5701 NOTE: this method uses sum-of-squares error function for training.
5702 
5703  -- ALGLIB --
5704  Copyright 22.08.2012 by Bochkanov Sergey
5705 *************************************************************************/
5706 void mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, const ae_int_t nrestarts, mlpreport &rep);
5707 void smp_mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, const ae_int_t nrestarts, mlpreport &rep);
5708 
5709 /*************************************************************************
5710 This function initializes clusterizer object. Newly initialized object is
5711 empty, i.e. it does not contain dataset. You should use it as follows:
5712 1. creation
5713 2. dataset is added with ClusterizerSetPoints()
5714 3. additional parameters are set
5715 3. clusterization is performed with one of the clustering functions
5716 
5717  -- ALGLIB --
5718  Copyright 10.07.2012 by Bochkanov Sergey
5719 *************************************************************************/
5721 
5722 
5723 /*************************************************************************
5724 This function adds dataset to the clusterizer structure.
5725 
5726 This function overrides all previous calls of ClusterizerSetPoints() or
5727 ClusterizerSetDistances().
5728 
5729 INPUT PARAMETERS:
5730  S - clusterizer state, initialized by ClusterizerCreate()
5731  XY - array[NPoints,NFeatures], dataset
5732  NPoints - number of points, >=0
5733  NFeatures- number of features, >=1
5734  DistType- distance function:
5735  * 0 Chebyshev distance (L-inf norm)
5736  * 1 city block distance (L1 norm)
5737  * 2 Euclidean distance (L2 norm), non-squared
5738  * 10 Pearson correlation:
5739  dist(a,b) = 1-corr(a,b)
5740  * 11 Absolute Pearson correlation:
5741  dist(a,b) = 1-|corr(a,b)|
5742  * 12 Uncentered Pearson correlation (cosine of the angle):
5743  dist(a,b) = a'*b/(|a|*|b|)
5744  * 13 Absolute uncentered Pearson correlation
5745  dist(a,b) = |a'*b|/(|a|*|b|)
5746  * 20 Spearman rank correlation:
5747  dist(a,b) = 1-rankcorr(a,b)
5748  * 21 Absolute Spearman rank correlation
5749  dist(a,b) = 1-|rankcorr(a,b)|
5750 
5751 NOTE 1: different distance functions have different performance penalty:
5752  * Euclidean or Pearson correlation distances are the fastest ones
5753  * Spearman correlation distance function is a bit slower
5754  * city block and Chebyshev distances are order of magnitude slower
5755 
5756  The reason behing difference in performance is that correlation-based
5757  distance functions are computed using optimized linear algebra kernels,
5758  while Chebyshev and city block distance functions are computed using
5759  simple nested loops with two branches at each iteration.
5760 
5761 NOTE 2: different clustering algorithms have different limitations:
5762  * agglomerative hierarchical clustering algorithms may be used with
5763  any kind of distance metric
5764  * k-means++ clustering algorithm may be used only with Euclidean
5765  distance function
5766  Thus, list of specific clustering algorithms you may use depends
5767  on distance function you specify when you set your dataset.
5768 
5769  -- ALGLIB --
5770  Copyright 10.07.2012 by Bochkanov Sergey
5771 *************************************************************************/
5772 void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype);
5773 void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t disttype);
5774 
5775 
5776 /*************************************************************************
5777 This function adds dataset given by distance matrix to the clusterizer
5778 structure. It is important that dataset is not given explicitly - only
5779 distance matrix is given.
5780 
5781 This function overrides all previous calls of ClusterizerSetPoints() or
5782 ClusterizerSetDistances().
5783 
5784 INPUT PARAMETERS:
5785  S - clusterizer state, initialized by ClusterizerCreate()
5786  D - array[NPoints,NPoints], distance matrix given by its upper
5787  or lower triangle (main diagonal is ignored because its
5788  entries are expected to be zero).
5789  NPoints - number of points
5790  IsUpper - whether upper or lower triangle of D is given.
5791 
5792 NOTE 1: different clustering algorithms have different limitations:
5793  * agglomerative hierarchical clustering algorithms may be used with
5794  any kind of distance metric, including one which is given by
5795  distance matrix
5796  * k-means++ clustering algorithm may be used only with Euclidean
5797  distance function and explicitly given points - it can not be
5798  used with dataset given by distance matrix
5799  Thus, if you call this function, you will be unable to use k-means
5800  clustering algorithm to process your problem.
5801 
5802  -- ALGLIB --
5803  Copyright 10.07.2012 by Bochkanov Sergey
5804 *************************************************************************/
5805 void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const ae_int_t npoints, const bool isupper);
5806 void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const bool isupper);
5807 
5808 
5809 /*************************************************************************
5810 This function sets agglomerative hierarchical clustering algorithm
5811 
5812 INPUT PARAMETERS:
5813  S - clusterizer state, initialized by ClusterizerCreate()
5814  Algo - algorithm type:
5815  * 0 complete linkage (default algorithm)
5816  * 1 single linkage
5817  * 2 unweighted average linkage
5818  * 3 weighted average linkage
5819  * 4 Ward's method
5820 
5821 NOTE: Ward's method works correctly only with Euclidean distance, that's
5822  why algorithm will return negative termination code (failure) for
5823  any other distance type.
5824 
5825  It is possible, however, to use this method with user-supplied
5826  distance matrix. It is your responsibility to pass one which was
5827  calculated with Euclidean distance function.
5828 
5829  -- ALGLIB --
5830  Copyright 10.07.2012 by Bochkanov Sergey
5831 *************************************************************************/
5833 
5834 
5835 /*************************************************************************
5836 This function sets k-means properties: number of restarts and maximum
5837 number of iterations per one run.
5838 
5839 INPUT PARAMETERS:
5840  S - clusterizer state, initialized by ClusterizerCreate()
5841  Restarts- restarts count, >=1.
5842  k-means++ algorithm performs several restarts and chooses
5843  best set of centers (one with minimum squared distance).
5844  MaxIts - maximum number of k-means iterations performed during one
5845  run. >=0, zero value means that algorithm performs unlimited
5846  number of iterations.
5847 
5848  -- ALGLIB --
5849  Copyright 10.07.2012 by Bochkanov Sergey
5850 *************************************************************************/
5851 void clusterizersetkmeanslimits(const clusterizerstate &s, const ae_int_t restarts, const ae_int_t maxits);
5852 
5853 
5854 /*************************************************************************
5855 This function sets k-means initialization algorithm. Several different
5856 algorithms can be chosen, including k-means++.
5857 
5858 INPUT PARAMETERS:
5859  S - clusterizer state, initialized by ClusterizerCreate()
5860  InitAlgo- initialization algorithm:
5861  * 0 automatic selection ( different versions of ALGLIB
5862  may select different algorithms)
5863  * 1 random initialization
5864  * 2 k-means++ initialization (best quality of initial
5865  centers, but long non-parallelizable initialization
5866  phase with bad cache locality)
5867  * 3 "fast-greedy" algorithm with efficient, easy to
5868  parallelize initialization. Quality of initial centers
5869  is somewhat worse than that of k-means++. This
5870  algorithm is a default one in the current version of
5871  ALGLIB.
5872  *-1 "debug" algorithm which always selects first K rows
5873  of dataset; this algorithm is used for debug purposes
5874  only. Do not use it in the industrial code!
5875 
5876  -- ALGLIB --
5877  Copyright 21.01.2015 by Bochkanov Sergey
5878 *************************************************************************/
5879 void clusterizersetkmeansinit(const clusterizerstate &s, const ae_int_t initalgo);
5880 
5881 
5882 /*************************************************************************
5883 This function performs agglomerative hierarchical clustering
5884 
5885 COMMERCIAL EDITION OF ALGLIB:
5886 
5887  ! Commercial version of ALGLIB includes two important improvements of
5888  ! this function, which can be used from C++ and C#:
5889  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5890  ! * multicore support
5891  !
5892  ! Agglomerative hierarchical clustering algorithm has two phases:
5893  ! distance matrix calculation and clustering itself. Only first phase
5894  ! (distance matrix calculation) is accelerated by Intel MKL and multi-
5895  ! threading. Thus, acceleration is significant only for medium or high-
5896  ! dimensional problems.
5897  !
5898  ! We recommend you to read 'Working with commercial version' section of
5899  ! ALGLIB Reference Manual in order to find out how to use performance-
5900  ! related features provided by commercial edition of ALGLIB.
5901 
5902 INPUT PARAMETERS:
5903  S - clusterizer state, initialized by ClusterizerCreate()
5904 
5905 OUTPUT PARAMETERS:
5906  Rep - clustering results; see description of AHCReport
5907  structure for more information.
5908 
5909 NOTE 1: hierarchical clustering algorithms require large amounts of memory.
5910  In particular, this implementation needs sizeof(double)*NPoints^2
5911  bytes, which are used to store distance matrix. In case we work
5912  with user-supplied matrix, this amount is multiplied by 2 (we have
5913  to store original matrix and to work with its copy).
5914 
5915  For example, problem with 10000 points would require 800M of RAM,
5916  even when working in a 1-dimensional space.
5917 
5918  -- ALGLIB --
5919  Copyright 10.07.2012 by Bochkanov Sergey
5920 *************************************************************************/
5923 
5924 
5925 /*************************************************************************
5926 This function performs clustering by k-means++ algorithm.
5927 
5928 You may change algorithm properties by calling:
5929 * ClusterizerSetKMeansLimits() to change number of restarts or iterations
5930 * ClusterizerSetKMeansInit() to change initialization algorithm
5931 
5932 By default, one restart and unlimited number of iterations are used.
5933 Initialization algorithm is chosen automatically.
5934 
5935 COMMERCIAL EDITION OF ALGLIB:
5936 
5937  ! Commercial version of ALGLIB includes two important improvements of
5938  ! this function:
5939  ! * multicore support (can be used from C# and C++)
5940  ! * access to high-performance C++ core (actual for C# users)
5941  !
5942  ! K-means clustering algorithm has two phases: selection of initial
5943  ! centers and clustering itself. ALGLIB parallelizes both phases.
5944  ! Parallel version is optimized for the following scenario: medium or
5945  ! high-dimensional problem (20 or more dimensions) with large number of
5946  ! points and clusters. However, some speed-up can be obtained even when
5947  ! assumptions above are violated.
5948  !
5949  ! As for native-vs-managed comparison, working with native core brings
5950  ! 30-40% improvement in speed over pure C# version of ALGLIB.
5951  !
5952  ! We recommend you to read 'Working with commercial version' section of
5953  ! ALGLIB Reference Manual in order to find out how to use performance-
5954  ! related features provided by commercial edition of ALGLIB.
5955 
5956 INPUT PARAMETERS:
5957  S - clusterizer state, initialized by ClusterizerCreate()
5958  K - number of clusters, K>=0.
5959  K can be zero only when algorithm is called for empty
5960  dataset, in this case completion code is set to
5961  success (+1).
5962  If K=0 and dataset size is non-zero, we can not
5963  meaningfully assign points to some center (there are no
5964  centers because K=0) and return -3 as completion code
5965  (failure).
5966 
5967 OUTPUT PARAMETERS:
5968  Rep - clustering results; see description of KMeansReport
5969  structure for more information.
5970 
5971 NOTE 1: k-means clustering can be performed only for datasets with
5972  Euclidean distance function. Algorithm will return negative
5973  completion code in Rep.TerminationType in case dataset was added
5974  to clusterizer with DistType other than Euclidean (or dataset was
5975  specified by distance matrix instead of explicitly given points).
5976 
5977  -- ALGLIB --
5978  Copyright 10.07.2012 by Bochkanov Sergey
5979 *************************************************************************/
5982 
5983 
5984 /*************************************************************************
5985 This function returns distance matrix for dataset
5986 
5987 COMMERCIAL EDITION OF ALGLIB:
5988 
5989  ! Commercial version of ALGLIB includes two important improvements of
5990  ! this function, which can be used from C++ and C#:
5991  ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB)
5992  ! * multicore support
5993  !
5994  ! Agglomerative hierarchical clustering algorithm has two phases:
5995  ! distance matrix calculation and clustering itself. Only first phase
5996  ! (distance matrix calculation) is accelerated by Intel MKL and multi-
5997  ! threading. Thus, acceleration is significant only for medium or high-
5998  ! dimensional problems.
5999  !
6000  ! We recommend you to read 'Working with commercial version' section of
6001  ! ALGLIB Reference Manual in order to find out how to use performance-
6002  ! related features provided by commercial edition of ALGLIB.
6003 
6004 INPUT PARAMETERS:
6005  XY - array[NPoints,NFeatures], dataset
6006  NPoints - number of points, >=0
6007  NFeatures- number of features, >=1
6008  DistType- distance function:
6009  * 0 Chebyshev distance (L-inf norm)
6010  * 1 city block distance (L1 norm)
6011  * 2 Euclidean distance (L2 norm, non-squared)
6012  * 10 Pearson correlation:
6013  dist(a,b) = 1-corr(a,b)
6014  * 11 Absolute Pearson correlation:
6015  dist(a,b) = 1-|corr(a,b)|
6016  * 12 Uncentered Pearson correlation (cosine of the angle):
6017  dist(a,b) = a'*b/(|a|*|b|)
6018  * 13 Absolute uncentered Pearson correlation
6019  dist(a,b) = |a'*b|/(|a|*|b|)
6020  * 20 Spearman rank correlation:
6021  dist(a,b) = 1-rankcorr(a,b)
6022  * 21 Absolute Spearman rank correlation
6023  dist(a,b) = 1-|rankcorr(a,b)|
6024 
6025 OUTPUT PARAMETERS:
6026  D - array[NPoints,NPoints], distance matrix
6027  (full matrix is returned, with lower and upper triangles)
6028 
6029 NOTE: different distance functions have different performance penalty:
6030  * Euclidean or Pearson correlation distances are the fastest ones
6031  * Spearman correlation distance function is a bit slower
6032  * city block and Chebyshev distances are order of magnitude slower
6033 
6034  The reason behing difference in performance is that correlation-based
6035  distance functions are computed using optimized linear algebra kernels,
6036  while Chebyshev and city block distance functions are computed using
6037  simple nested loops with two branches at each iteration.
6038 
6039  -- ALGLIB --
6040  Copyright 10.07.2012 by Bochkanov Sergey
6041 *************************************************************************/
6042 void clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d);
6043 void smp_clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d);
6044 
6045 
6046 /*************************************************************************
6047 This function takes as input clusterization report Rep, desired clusters
6048 count K, and builds top K clusters from hierarchical clusterization tree.
6049 It returns assignment of points to clusters (array of cluster indexes).
6050 
6051 INPUT PARAMETERS:
6052  Rep - report from ClusterizerRunAHC() performed on XY
6053  K - desired number of clusters, 1<=K<=NPoints.
6054  K can be zero only when NPoints=0.
6055 
6056 OUTPUT PARAMETERS:
6057  CIdx - array[NPoints], I-th element contains cluster index (from
6058  0 to K-1) for I-th point of the dataset.
6059  CZ - array[K]. This array allows to convert cluster indexes
6060  returned by this function to indexes used by Rep.Z. J-th
6061  cluster returned by this function corresponds to CZ[J]-th
6062  cluster stored in Rep.Z/PZ/PM.
6063  It is guaranteed that CZ[I]<CZ[I+1].
6064 
6065 NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
6066  Although they were obtained by manipulation with top K nodes of
6067  dendrogram (i.e. hierarchical decomposition of dataset), this
6068  function does not return information about hierarchy. Each of the
6069  clusters stand on its own.
6070 
6071 NOTE: Cluster indexes returned by this function does not correspond to
6072  indexes returned in Rep.Z/PZ/PM. Either you work with hierarchical
6073  representation of the dataset (dendrogram), or you work with "flat"
6074  representation returned by this function. Each of representations
6075  has its own clusters indexing system (former uses [0, 2*NPoints-2]),
6076  while latter uses [0..K-1]), although it is possible to perform
6077  conversion from one system to another by means of CZ array, returned
6078  by this function, which allows you to convert indexes stored in CIdx
6079  to the numeration system used by Rep.Z.
6080 
6081 NOTE: this subroutine is optimized for moderate values of K. Say, for K=5
6082  it will perform many times faster than for K=100. Its worst-case
6083  performance is O(N*K), although in average case it perform better
6084  (up to O(N*log(K))).
6085 
6086  -- ALGLIB --
6087  Copyright 10.07.2012 by Bochkanov Sergey
6088 *************************************************************************/
6090 
6091 
6092 /*************************************************************************
6093 This function accepts AHC report Rep, desired minimum intercluster
6094 distance and returns top clusters from hierarchical clusterization tree
6095 which are separated by distance R or HIGHER.
6096 
6097 It returns assignment of points to clusters (array of cluster indexes).
6098 
6099 There is one more function with similar name - ClusterizerSeparatedByCorr,
6100 which returns clusters with intercluster correlation equal to R or LOWER
6101 (note: higher for distance, lower for correlation).
6102 
6103 INPUT PARAMETERS:
6104  Rep - report from ClusterizerRunAHC() performed on XY
6105  R - desired minimum intercluster distance, R>=0
6106 
6107 OUTPUT PARAMETERS:
6108  K - number of clusters, 1<=K<=NPoints
6109  CIdx - array[NPoints], I-th element contains cluster index (from
6110  0 to K-1) for I-th point of the dataset.
6111  CZ - array[K]. This array allows to convert cluster indexes
6112  returned by this function to indexes used by Rep.Z. J-th
6113  cluster returned by this function corresponds to CZ[J]-th
6114  cluster stored in Rep.Z/PZ/PM.
6115  It is guaranteed that CZ[I]<CZ[I+1].
6116 
6117 NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
6118  Although they were obtained by manipulation with top K nodes of
6119  dendrogram (i.e. hierarchical decomposition of dataset), this
6120  function does not return information about hierarchy. Each of the
6121  clusters stand on its own.
6122 
6123 NOTE: Cluster indexes returned by this function does not correspond to
6124  indexes returned in Rep.Z/PZ/PM. Either you work with hierarchical
6125  representation of the dataset (dendrogram), or you work with "flat"
6126  representation returned by this function. Each of representations
6127  has its own clusters indexing system (former uses [0, 2*NPoints-2]),
6128  while latter uses [0..K-1]), although it is possible to perform
6129  conversion from one system to another by means of CZ array, returned
6130  by this function, which allows you to convert indexes stored in CIdx
6131  to the numeration system used by Rep.Z.
6132 
6133 NOTE: this subroutine is optimized for moderate values of K. Say, for K=5
6134  it will perform many times faster than for K=100. Its worst-case
6135  performance is O(N*K), although in average case it perform better
6136  (up to O(N*log(K))).
6137 
6138  -- ALGLIB --
6139  Copyright 10.07.2012 by Bochkanov Sergey
6140 *************************************************************************/
6141 void clusterizerseparatedbydist(const ahcreport &rep, const double r, ae_int_t &k, integer_1d_array &cidx, integer_1d_array &cz);
6142 
6143 
6144 /*************************************************************************
6145 This function accepts AHC report Rep, desired maximum intercluster
6146 correlation and returns top clusters from hierarchical clusterization tree
6147 which are separated by correlation R or LOWER.
6148 
6149 It returns assignment of points to clusters (array of cluster indexes).
6150 
6151 There is one more function with similar name - ClusterizerSeparatedByDist,
6152 which returns clusters with intercluster distance equal to R or HIGHER
6153 (note: higher for distance, lower for correlation).
6154 
6155 INPUT PARAMETERS:
6156  Rep - report from ClusterizerRunAHC() performed on XY
6157  R - desired maximum intercluster correlation, -1<=R<=+1
6158 
6159 OUTPUT PARAMETERS:
6160  K - number of clusters, 1<=K<=NPoints
6161  CIdx - array[NPoints], I-th element contains cluster index (from
6162  0 to K-1) for I-th point of the dataset.
6163  CZ - array[K]. This array allows to convert cluster indexes
6164  returned by this function to indexes used by Rep.Z. J-th
6165  cluster returned by this function corresponds to CZ[J]-th
6166  cluster stored in Rep.Z/PZ/PM.
6167  It is guaranteed that CZ[I]<CZ[I+1].
6168 
6169 NOTE: K clusters built by this subroutine are assumed to have no hierarchy.
6170  Although they were obtained by manipulation with top K nodes of
6171  dendrogram (i.e. hierarchical decomposition of dataset), this
6172  function does not return information about hierarchy. Each of the
6173  clusters stand on its own.
6174 
6175 NOTE: Cluster indexes returned by this function does not correspond to
6176  indexes returned in Rep.Z/PZ/PM. Either you work with hierarchical
6177  representation of the dataset (dendrogram), or you work with "flat"
6178  representation returned by this function. Each of representations
6179  has its own clusters indexing system (former uses [0, 2*NPoints-2]),
6180  while latter uses [0..K-1]), although it is possible to perform
6181  conversion from one system to another by means of CZ array, returned
6182  by this function, which allows you to convert indexes stored in CIdx
6183  to the numeration system used by Rep.Z.
6184 
6185 NOTE: this subroutine is optimized for moderate values of K. Say, for K=5
6186  it will perform many times faster than for K=100. Its worst-case
6187  performance is O(N*K), although in average case it perform better
6188  (up to O(N*log(K))).
6189 
6190  -- ALGLIB --
6191  Copyright 10.07.2012 by Bochkanov Sergey
6192 *************************************************************************/
6193 void clusterizerseparatedbycorr(const ahcreport &rep, const double r, ae_int_t &k, integer_1d_array &cidx, integer_1d_array &cz);
6194 
6195 /*************************************************************************
6196 This function serializes data structure to string.
6197 
6198 Important properties of s_out:
6199 * it contains alphanumeric characters, dots, underscores, minus signs
6200 * these symbols are grouped into words, which are separated by spaces
6201  and Windows-style (CR+LF) newlines
6202 * although serializer uses spaces and CR+LF as separators, you can
6203  replace any separator character by arbitrary combination of spaces,
6204  tabs, Windows or Unix newlines. It allows flexible reformatting of
6205  the string in case you want to include it into text or XML file.
6206  But you should not insert separators into the middle of the "words"
6207  nor you should change case of letters.
6208 * s_out can be freely moved between 32-bit and 64-bit systems, little
6209  and big endian machines, and so on. You can serialize structure on
6210  32-bit machine and unserialize it on 64-bit one (or vice versa), or
6211  serialize it on SPARC and unserialize on x86. You can also
6212  serialize it in C++ version of ALGLIB and unserialize in C# one,
6213  and vice versa.
6214 *************************************************************************/
6215 void dfserialize(decisionforest &obj, std::string &s_out);
6216 
6217 
6218 /*************************************************************************
6219 This function unserializes data structure from string.
6220 *************************************************************************/
6221 void dfunserialize(const std::string &s_in, decisionforest &obj);
6222 
6223 
6224 
6225 
6226 /*************************************************************************
6227 This function serializes data structure to C++ stream.
6228 
6229 Data stream generated by this function is same as string representation
6230 generated by string version of serializer - alphanumeric characters,
6231 dots, underscores, minus signs, which are grouped into words separated by
6232 spaces and CR+LF.
6233 
6234 We recommend you to read comments on string version of serializer to find
6235 out more about serialization of AlGLIB objects.
6236 *************************************************************************/
6237 void dfserialize(decisionforest &obj, std::ostream &s_out);
6238 
6239 
6240 /*************************************************************************
6241 This function unserializes data structure from stream.
6242 *************************************************************************/
6243 void dfunserialize(const std::istream &s_in, decisionforest &obj);
6244 
6245 
6246 /*************************************************************************
6247 This subroutine builds random decision forest.
6248 
6249 INPUT PARAMETERS:
6250  XY - training set
6251  NPoints - training set size, NPoints>=1
6252  NVars - number of independent variables, NVars>=1
6253  NClasses - task type:
6254  * NClasses=1 - regression task with one
6255  dependent variable
6256  * NClasses>1 - classification task with
6257  NClasses classes.
6258  NTrees - number of trees in a forest, NTrees>=1.
6259  recommended values: 50-100.
6260  R - percent of a training set used to build
6261  individual trees. 0<R<=1.
6262  recommended values: 0.1 <= R <= 0.66.
6263 
6264 OUTPUT PARAMETERS:
6265  Info - return code:
6266  * -2, if there is a point with class number
6267  outside of [0..NClasses-1].
6268  * -1, if incorrect parameters was passed
6269  (NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0
6270  or R>1).
6271  * 1, if task has been solved
6272  DF - model built
6273  Rep - training report, contains error on a training set
6274  and out-of-bag estimates of generalization error.
6275 
6276  -- ALGLIB --
6277  Copyright 19.02.2009 by Bochkanov Sergey
6278 *************************************************************************/
6279 void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const double r, ae_int_t &info, decisionforest &df, dfreport &rep);
6280 
6281 
6282 /*************************************************************************
6283 This subroutine builds random decision forest.
6284 This function gives ability to tune number of variables used when choosing
6285 best split.
6286 
6287 INPUT PARAMETERS:
6288  XY - training set
6289  NPoints - training set size, NPoints>=1
6290  NVars - number of independent variables, NVars>=1
6291  NClasses - task type:
6292  * NClasses=1 - regression task with one
6293  dependent variable
6294  * NClasses>1 - classification task with
6295  NClasses classes.
6296  NTrees - number of trees in a forest, NTrees>=1.
6297  recommended values: 50-100.
6298  NRndVars - number of variables used when choosing best split
6299  R - percent of a training set used to build
6300  individual trees. 0<R<=1.
6301  recommended values: 0.1 <= R <= 0.66.
6302 
6303 OUTPUT PARAMETERS:
6304  Info - return code:
6305  * -2, if there is a point with class number
6306  outside of [0..NClasses-1].
6307  * -1, if incorrect parameters was passed
6308  (NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0
6309  or R>1).
6310  * 1, if task has been solved
6311  DF - model built
6312  Rep - training report, contains error on a training set
6313  and out-of-bag estimates of generalization error.
6314 
6315  -- ALGLIB --
6316  Copyright 19.02.2009 by Bochkanov Sergey
6317 *************************************************************************/
6318 void dfbuildrandomdecisionforestx1(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const ae_int_t nrndvars, const double r, ae_int_t &info, decisionforest &df, dfreport &rep);
6319 
6320 
6321 /*************************************************************************
6322 Procesing
6323 
6324 INPUT PARAMETERS:
6325  DF - decision forest model
6326  X - input vector, array[0..NVars-1].
6327 
6328 OUTPUT PARAMETERS:
6329  Y - result. Regression estimate when solving regression task,
6330  vector of posterior probabilities for classification task.
6331 
6332 See also DFProcessI.
6333 
6334  -- ALGLIB --
6335  Copyright 16.02.2009 by Bochkanov Sergey
6336 *************************************************************************/
6337 void dfprocess(const decisionforest &df, const real_1d_array &x, real_1d_array &y);
6338 
6339 
6340 /*************************************************************************
6341 'interactive' variant of DFProcess for languages like Python which support
6342 constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter
6343 
6344 This function allocates new array on each call, so it is significantly
6345 slower than its 'non-interactive' counterpart, but it is more convenient
6346 when you call it from command line.
6347 
6348  -- ALGLIB --
6349  Copyright 28.02.2010 by Bochkanov Sergey
6350 *************************************************************************/
6352 
6353 
6354 /*************************************************************************
6355 Relative classification error on the test set
6356 
6357 INPUT PARAMETERS:
6358  DF - decision forest model
6359  XY - test set
6360  NPoints - test set size
6361 
6362 RESULT:
6363  percent of incorrectly classified cases.
6364  Zero if model solves regression task.
6365 
6366  -- ALGLIB --
6367  Copyright 16.02.2009 by Bochkanov Sergey
6368 *************************************************************************/
6369 double dfrelclserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints);
6370 
6371 
6372 /*************************************************************************
6373 Average cross-entropy (in bits per element) on the test set
6374 
6375 INPUT PARAMETERS:
6376  DF - decision forest model
6377  XY - test set
6378  NPoints - test set size
6379 
6380 RESULT:
6381  CrossEntropy/(NPoints*LN(2)).
6382  Zero if model solves regression task.
6383 
6384  -- ALGLIB --
6385  Copyright 16.02.2009 by Bochkanov Sergey
6386 *************************************************************************/
6387 double dfavgce(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints);
6388 
6389 
6390 /*************************************************************************
6391 RMS error on the test set
6392 
6393 INPUT PARAMETERS:
6394  DF - decision forest model
6395  XY - test set
6396  NPoints - test set size
6397 
6398 RESULT:
6399  root mean square error.
6400  Its meaning for regression task is obvious. As for
6401  classification task, RMS error means error when estimating posterior
6402  probabilities.
6403 
6404  -- ALGLIB --
6405  Copyright 16.02.2009 by Bochkanov Sergey
6406 *************************************************************************/
6407 double dfrmserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints);
6408 
6409 
6410 /*************************************************************************
6411 Average error on the test set
6412 
6413 INPUT PARAMETERS:
6414  DF - decision forest model
6415  XY - test set
6416  NPoints - test set size
6417 
6418 RESULT:
6419  Its meaning for regression task is obvious. As for
6420  classification task, it means average error when estimating posterior
6421  probabilities.
6422 
6423  -- ALGLIB --
6424  Copyright 16.02.2009 by Bochkanov Sergey
6425 *************************************************************************/
6426 double dfavgerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints);
6427 
6428 
6429 /*************************************************************************
6430 Average relative error on the test set
6431 
6432 INPUT PARAMETERS:
6433  DF - decision forest model
6434  XY - test set
6435  NPoints - test set size
6436 
6437 RESULT:
6438  Its meaning for regression task is obvious. As for
6439  classification task, it means average relative error when estimating
6440  posterior probability of belonging to the correct class.
6441 
6442  -- ALGLIB --
6443  Copyright 16.02.2009 by Bochkanov Sergey
6444 *************************************************************************/
6445 double dfavgrelerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints);
6446 
6447 /*************************************************************************
6448 k-means++ clusterization.
6449 Backward compatibility function, we recommend to use CLUSTERING subpackage
6450 as better replacement.
6451 
6452  -- ALGLIB --
6453  Copyright 21.03.2009 by Bochkanov Sergey
6454 *************************************************************************/
6455 void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info, real_2d_array &c, integer_1d_array &xyc);
6456 }
6457 
6459 //
6460 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
6461 //
6463 namespace alglib_impl
6464 {
6465 void pcabuildbasis(/* Real */ ae_matrix* x,
6466  ae_int_t npoints,
6467  ae_int_t nvars,
6468  ae_int_t* info,
6469  /* Real */ ae_vector* s2,
6470  /* Real */ ae_matrix* v,
6471  ae_state *_state);
6472 void _pexec_pcabuildbasis(/* Real */ ae_matrix* x,
6473  ae_int_t npoints,
6474  ae_int_t nvars,
6475  ae_int_t* info,
6476  /* Real */ ae_vector* s2,
6477  /* Real */ ae_matrix* v, ae_state *_state);
6478 void pcatruncatedsubspace(/* Real */ ae_matrix* x,
6479  ae_int_t npoints,
6480  ae_int_t nvars,
6481  ae_int_t nneeded,
6482  double eps,
6483  ae_int_t maxits,
6484  /* Real */ ae_vector* s2,
6485  /* Real */ ae_matrix* v,
6486  ae_state *_state);
6488  ae_int_t npoints,
6489  ae_int_t nvars,
6490  ae_int_t nneeded,
6491  double eps,
6492  ae_int_t maxits,
6493  /* Real */ ae_vector* s2,
6494  /* Real */ ae_matrix* v, ae_state *_state);
6495 void dserrallocate(ae_int_t nclasses,
6496  /* Real */ ae_vector* buf,
6497  ae_state *_state);
6498 void dserraccumulate(/* Real */ ae_vector* buf,
6499  /* Real */ ae_vector* y,
6500  /* Real */ ae_vector* desiredy,
6501  ae_state *_state);
6502 void dserrfinish(/* Real */ ae_vector* buf, ae_state *_state);
6503 void dsnormalize(/* Real */ ae_matrix* xy,
6504  ae_int_t npoints,
6505  ae_int_t nvars,
6506  ae_int_t* info,
6507  /* Real */ ae_vector* means,
6508  /* Real */ ae_vector* sigmas,
6509  ae_state *_state);
6510 void dsnormalizec(/* Real */ ae_matrix* xy,
6511  ae_int_t npoints,
6512  ae_int_t nvars,
6513  ae_int_t* info,
6514  /* Real */ ae_vector* means,
6515  /* Real */ ae_vector* sigmas,
6516  ae_state *_state);
6517 double dsgetmeanmindistance(/* Real */ ae_matrix* xy,
6518  ae_int_t npoints,
6519  ae_int_t nvars,
6520  ae_state *_state);
6521 void dstie(/* Real */ ae_vector* a,
6522  ae_int_t n,
6523  /* Integer */ ae_vector* ties,
6524  ae_int_t* tiecount,
6525  /* Integer */ ae_vector* p1,
6526  /* Integer */ ae_vector* p2,
6527  ae_state *_state);
6528 void dstiefasti(/* Real */ ae_vector* a,
6529  /* Integer */ ae_vector* b,
6530  ae_int_t n,
6531  /* Integer */ ae_vector* ties,
6532  ae_int_t* tiecount,
6533  /* Real */ ae_vector* bufr,
6534  /* Integer */ ae_vector* bufi,
6535  ae_state *_state);
6536 void dsoptimalsplit2(/* Real */ ae_vector* a,
6537  /* Integer */ ae_vector* c,
6538  ae_int_t n,
6539  ae_int_t* info,
6540  double* threshold,
6541  double* pal,
6542  double* pbl,
6543  double* par,
6544  double* pbr,
6545  double* cve,
6546  ae_state *_state);
6547 void dsoptimalsplit2fast(/* Real */ ae_vector* a,
6548  /* Integer */ ae_vector* c,
6549  /* Integer */ ae_vector* tiesbuf,
6550  /* Integer */ ae_vector* cntbuf,
6551  /* Real */ ae_vector* bufr,
6552  /* Integer */ ae_vector* bufi,
6553  ae_int_t n,
6554  ae_int_t nc,
6555  double alpha,
6556  ae_int_t* info,
6557  double* threshold,
6558  double* rms,
6559  double* cvrms,
6560  ae_state *_state);
6561 void dssplitk(/* Real */ ae_vector* a,
6562  /* Integer */ ae_vector* c,
6563  ae_int_t n,
6564  ae_int_t nc,
6565  ae_int_t kmax,
6566  ae_int_t* info,
6567  /* Real */ ae_vector* thresholds,
6568  ae_int_t* ni,
6569  double* cve,
6570  ae_state *_state);
6571 void dsoptimalsplitk(/* Real */ ae_vector* a,
6572  /* Integer */ ae_vector* c,
6573  ae_int_t n,
6574  ae_int_t nc,
6575  ae_int_t kmax,
6576  ae_int_t* info,
6577  /* Real */ ae_vector* thresholds,
6578  ae_int_t* ni,
6579  double* cve,
6580  ae_state *_state);
6581 void _cvreport_init(void* _p, ae_state *_state);
6582 void _cvreport_init_copy(void* _dst, void* _src, ae_state *_state);
6583 void _cvreport_clear(void* _p);
6584 void _cvreport_destroy(void* _p);
6588  ae_int_t nout,
6589  multilayerperceptron* network,
6590  ae_state *_state);
6592  ae_int_t nhid,
6593  ae_int_t nout,
6594  multilayerperceptron* network,
6595  ae_state *_state);
6597  ae_int_t nhid1,
6598  ae_int_t nhid2,
6599  ae_int_t nout,
6600  multilayerperceptron* network,
6601  ae_state *_state);
6603  ae_int_t nout,
6604  double b,
6605  double d,
6606  multilayerperceptron* network,
6607  ae_state *_state);
6609  ae_int_t nhid,
6610  ae_int_t nout,
6611  double b,
6612  double d,
6613  multilayerperceptron* network,
6614  ae_state *_state);
6616  ae_int_t nhid1,
6617  ae_int_t nhid2,
6618  ae_int_t nout,
6619  double b,
6620  double d,
6621  multilayerperceptron* network,
6622  ae_state *_state);
6624  ae_int_t nout,
6625  double a,
6626  double b,
6627  multilayerperceptron* network,
6628  ae_state *_state);
6630  ae_int_t nhid,
6631  ae_int_t nout,
6632  double a,
6633  double b,
6634  multilayerperceptron* network,
6635  ae_state *_state);
6637  ae_int_t nhid1,
6638  ae_int_t nhid2,
6639  ae_int_t nout,
6640  double a,
6641  double b,
6642  multilayerperceptron* network,
6643  ae_state *_state);
6645  ae_int_t nout,
6646  multilayerperceptron* network,
6647  ae_state *_state);
6649  ae_int_t nhid,
6650  ae_int_t nout,
6651  multilayerperceptron* network,
6652  ae_state *_state);
6654  ae_int_t nhid1,
6655  ae_int_t nhid2,
6656  ae_int_t nout,
6657  multilayerperceptron* network,
6658  ae_state *_state);
6660  multilayerperceptron* network2,
6661  ae_state *_state);
6663  multilayerperceptron* network2,
6664  ae_state *_state);
6666  multilayerperceptron* network2,
6667  ae_state *_state);
6669  multilayerperceptron* network2,
6670  ae_state *_state);
6672  /* Real */ ae_vector* p,
6673  ae_int_t* pcount,
6674  ae_state *_state);
6676  /* Real */ ae_vector* p,
6677  ae_state *_state);
6679  /* Real */ ae_vector* ra,
6680  ae_int_t* rlen,
6681  ae_state *_state);
6682 void mlpunserializeold(/* Real */ ae_vector* ra,
6683  multilayerperceptron* network,
6684  ae_state *_state);
6688  /* Real */ ae_matrix* xy,
6689  ae_int_t ssize,
6690  ae_state *_state);
6692  sparsematrix* xy,
6693  ae_int_t ssize,
6694  ae_state *_state);
6696  /* Real */ ae_matrix* xy,
6697  ae_int_t setsize,
6698  /* Integer */ ae_vector* idx,
6699  ae_int_t subsetsize,
6700  ae_state *_state);
6702  sparsematrix* xy,
6703  ae_int_t setsize,
6704  /* Integer */ ae_vector* idx,
6705  ae_int_t subsetsize,
6706  ae_state *_state);
6708  ae_int_t* nin,
6709  ae_int_t* nout,
6710  ae_int_t* wcount,
6711  ae_state *_state);
6714  ae_state *_state);
6716  ae_state *_state);
6718  ae_state *_state);
6721  ae_state *_state);
6723  ae_int_t k,
6724  ae_state *_state);
6726  ae_int_t i,
6727  double* mean,
6728  double* sigma,
6729  ae_state *_state);
6731  ae_int_t i,
6732  double* mean,
6733  double* sigma,
6734  ae_state *_state);
6736  ae_int_t k,
6737  ae_int_t i,
6738  ae_int_t* fkind,
6739  double* threshold,
6740  ae_state *_state);
6742  ae_int_t k0,
6743  ae_int_t i0,
6744  ae_int_t k1,
6745  ae_int_t i1,
6746  ae_state *_state);
6748  ae_int_t i,
6749  double mean,
6750  double sigma,
6751  ae_state *_state);
6753  ae_int_t i,
6754  double mean,
6755  double sigma,
6756  ae_state *_state);
6758  ae_int_t k,
6759  ae_int_t i,
6760  ae_int_t fkind,
6761  double threshold,
6762  ae_state *_state);
6764  ae_int_t k0,
6765  ae_int_t i0,
6766  ae_int_t k1,
6767  ae_int_t i1,
6768  double w,
6769  ae_state *_state);
6770 void mlpactivationfunction(double net,
6771  ae_int_t k,
6772  double* f,
6773  double* df,
6774  double* d2f,
6775  ae_state *_state);
6777  /* Real */ ae_vector* x,
6778  /* Real */ ae_vector* y,
6779  ae_state *_state);
6781  /* Real */ ae_vector* x,
6782  /* Real */ ae_vector* y,
6783  ae_state *_state);
6785  /* Real */ ae_matrix* xy,
6786  ae_int_t npoints,
6787  ae_state *_state);
6789  /* Real */ ae_matrix* xy,
6790  ae_int_t npoints, ae_state *_state);
6792  sparsematrix* xy,
6793  ae_int_t npoints,
6794  ae_state *_state);
6796  sparsematrix* xy,
6797  ae_int_t npoints, ae_state *_state);
6799  /* Real */ ae_matrix* xy,
6800  ae_int_t ssize,
6801  ae_state *_state);
6803  /* Real */ ae_matrix* xy,
6804  ae_int_t npoints,
6805  ae_state *_state);
6807  /* Real */ ae_matrix* xy,
6808  ae_int_t npoints, ae_state *_state);
6810  /* Real */ ae_matrix* xy,
6811  ae_int_t npoints,
6812  ae_state *_state);
6814  /* Real */ ae_matrix* xy,
6815  ae_int_t npoints, ae_state *_state);
6817  sparsematrix* xy,
6818  ae_int_t npoints,
6819  ae_state *_state);
6821  sparsematrix* xy,
6822  ae_int_t npoints, ae_state *_state);
6824  /* Real */ ae_matrix* xy,
6825  ae_int_t npoints,
6826  ae_state *_state);
6828  /* Real */ ae_matrix* xy,
6829  ae_int_t npoints, ae_state *_state);
6831  sparsematrix* xy,
6832  ae_int_t npoints,
6833  ae_state *_state);
6835  sparsematrix* xy,
6836  ae_int_t npoints, ae_state *_state);
6838  /* Real */ ae_matrix* xy,
6839  ae_int_t npoints,
6840  ae_state *_state);
6842  /* Real */ ae_matrix* xy,
6843  ae_int_t npoints, ae_state *_state);
6845  sparsematrix* xy,
6846  ae_int_t npoints,
6847  ae_state *_state);
6849  sparsematrix* xy,
6850  ae_int_t npoints, ae_state *_state);
6852  /* Real */ ae_matrix* xy,
6853  ae_int_t npoints,
6854  ae_state *_state);
6856  /* Real */ ae_matrix* xy,
6857  ae_int_t npoints, ae_state *_state);
6859  sparsematrix* xy,
6860  ae_int_t npoints,
6861  ae_state *_state);
6863  sparsematrix* xy,
6864  ae_int_t npoints, ae_state *_state);
6866  /* Real */ ae_matrix* xy,
6867  ae_int_t npoints,
6868  ae_state *_state);
6870  /* Real */ ae_matrix* xy,
6871  ae_int_t npoints, ae_state *_state);
6873  sparsematrix* xy,
6874  ae_int_t npoints,
6875  ae_state *_state);
6877  sparsematrix* xy,
6878  ae_int_t npoints, ae_state *_state);
6880  /* Real */ ae_vector* x,
6881  /* Real */ ae_vector* desiredy,
6882  double* e,
6883  /* Real */ ae_vector* grad,
6884  ae_state *_state);
6886  /* Real */ ae_vector* x,
6887  /* Real */ ae_vector* desiredy,
6888  double* e,
6889  /* Real */ ae_vector* grad,
6890  ae_state *_state);
6892  /* Real */ ae_matrix* xy,
6893  ae_int_t ssize,
6894  double* e,
6895  /* Real */ ae_vector* grad,
6896  ae_state *_state);
6898  /* Real */ ae_matrix* xy,
6899  ae_int_t ssize,
6900  double* e,
6901  /* Real */ ae_vector* grad, ae_state *_state);
6903  sparsematrix* xy,
6904  ae_int_t ssize,
6905  double* e,
6906  /* Real */ ae_vector* grad,
6907  ae_state *_state);
6909  sparsematrix* xy,
6910  ae_int_t ssize,
6911  double* e,
6912  /* Real */ ae_vector* grad, ae_state *_state);
6914  /* Real */ ae_matrix* xy,
6915  ae_int_t setsize,
6916  /* Integer */ ae_vector* idx,
6917  ae_int_t subsetsize,
6918  double* e,
6919  /* Real */ ae_vector* grad,
6920  ae_state *_state);
6922  /* Real */ ae_matrix* xy,
6923  ae_int_t setsize,
6924  /* Integer */ ae_vector* idx,
6925  ae_int_t subsetsize,
6926  double* e,
6927  /* Real */ ae_vector* grad, ae_state *_state);
6929  sparsematrix* xy,
6930  ae_int_t setsize,
6931  /* Integer */ ae_vector* idx,
6932  ae_int_t subsetsize,
6933  double* e,
6934  /* Real */ ae_vector* grad,
6935  ae_state *_state);
6937  sparsematrix* xy,
6938  ae_int_t setsize,
6939  /* Integer */ ae_vector* idx,
6940  ae_int_t subsetsize,
6941  double* e,
6942  /* Real */ ae_vector* grad, ae_state *_state);
6944  /* Real */ ae_matrix* densexy,
6945  sparsematrix* sparsexy,
6946  ae_int_t datasetsize,
6947  ae_int_t datasettype,
6948  /* Integer */ ae_vector* idx,
6949  ae_int_t subset0,
6950  ae_int_t subset1,
6951  ae_int_t subsettype,
6952  ae_shared_pool* buf,
6953  ae_shared_pool* gradbuf,
6954  ae_state *_state);
6956  /* Real */ ae_matrix* xy,
6957  ae_int_t ssize,
6958  double* e,
6959  /* Real */ ae_vector* grad,
6960  ae_state *_state);
6962  /* Real */ ae_matrix* xy,
6963  ae_int_t ssize,
6964  double* e,
6965  /* Real */ ae_vector* grad,
6966  /* Real */ ae_matrix* h,
6967  ae_state *_state);
6969  /* Real */ ae_matrix* xy,
6970  ae_int_t ssize,
6971  double* e,
6972  /* Real */ ae_vector* grad,
6973  /* Real */ ae_matrix* h,
6974  ae_state *_state);
6975 void mlpinternalprocessvector(/* Integer */ ae_vector* structinfo,
6976  /* Real */ ae_vector* weights,
6977  /* Real */ ae_vector* columnmeans,
6978  /* Real */ ae_vector* columnsigmas,
6979  /* Real */ ae_vector* neurons,
6980  /* Real */ ae_vector* dfdnet,
6981  /* Real */ ae_vector* x,
6982  /* Real */ ae_vector* y,
6983  ae_state *_state);
6985  multilayerperceptron* network,
6986  ae_state *_state);
6988  multilayerperceptron* network,
6989  ae_state *_state);
6991  multilayerperceptron* network,
6992  ae_state *_state);
6994  /* Real */ ae_matrix* xy,
6995  ae_int_t setsize,
6996  /* Integer */ ae_vector* subset,
6997  ae_int_t subsetsize,
6998  modelerrors* rep,
6999  ae_state *_state);
7001  /* Real */ ae_matrix* xy,
7002  ae_int_t setsize,
7003  /* Integer */ ae_vector* subset,
7004  ae_int_t subsetsize,
7005  modelerrors* rep, ae_state *_state);
7007  sparsematrix* xy,
7008  ae_int_t setsize,
7009  /* Integer */ ae_vector* subset,
7010  ae_int_t subsetsize,
7011  modelerrors* rep,
7012  ae_state *_state);
7014  sparsematrix* xy,
7015  ae_int_t setsize,
7016  /* Integer */ ae_vector* subset,
7017  ae_int_t subsetsize,
7018  modelerrors* rep, ae_state *_state);
7020  /* Real */ ae_matrix* xy,
7021  ae_int_t setsize,
7022  /* Integer */ ae_vector* subset,
7023  ae_int_t subsetsize,
7024  ae_state *_state);
7026  /* Real */ ae_matrix* xy,
7027  ae_int_t setsize,
7028  /* Integer */ ae_vector* subset,
7029  ae_int_t subsetsize, ae_state *_state);
7031  sparsematrix* xy,
7032  ae_int_t setsize,
7033  /* Integer */ ae_vector* subset,
7034  ae_int_t subsetsize,
7035  ae_state *_state);
7037  sparsematrix* xy,
7038  ae_int_t setsize,
7039  /* Integer */ ae_vector* subset,
7040  ae_int_t subsetsize, ae_state *_state);
7042  /* Real */ ae_matrix* densexy,
7043  sparsematrix* sparsexy,
7044  ae_int_t datasetsize,
7045  ae_int_t datasettype,
7046  /* Integer */ ae_vector* idx,
7047  ae_int_t subset0,
7048  ae_int_t subset1,
7049  ae_int_t subsettype,
7050  ae_shared_pool* buf,
7051  modelerrors* rep,
7052  ae_state *_state);
7053 void _modelerrors_init(void* _p, ae_state *_state);
7054 void _modelerrors_init_copy(void* _dst, void* _src, ae_state *_state);
7055 void _modelerrors_clear(void* _p);
7056 void _modelerrors_destroy(void* _p);
7057 void _smlpgrad_init(void* _p, ae_state *_state);
7058 void _smlpgrad_init_copy(void* _dst, void* _src, ae_state *_state);
7059 void _smlpgrad_clear(void* _p);
7060 void _smlpgrad_destroy(void* _p);
7061 void _multilayerperceptron_init(void* _p, ae_state *_state);
7062 void _multilayerperceptron_init_copy(void* _dst, void* _src, ae_state *_state);
7065 void fisherlda(/* Real */ ae_matrix* xy,
7066  ae_int_t npoints,
7067  ae_int_t nvars,
7068  ae_int_t nclasses,
7069  ae_int_t* info,
7070  /* Real */ ae_vector* w,
7071  ae_state *_state);
7072 void fisherldan(/* Real */ ae_matrix* xy,
7073  ae_int_t npoints,
7074  ae_int_t nvars,
7075  ae_int_t nclasses,
7076  ae_int_t* info,
7077  /* Real */ ae_matrix* w,
7078  ae_state *_state);
7079 void _pexec_fisherldan(/* Real */ ae_matrix* xy,
7080  ae_int_t npoints,
7081  ae_int_t nvars,
7082  ae_int_t nclasses,
7083  ae_int_t* info,
7084  /* Real */ ae_matrix* w, ae_state *_state);
7085 void lrbuild(/* Real */ ae_matrix* xy,
7086  ae_int_t npoints,
7087  ae_int_t nvars,
7088  ae_int_t* info,
7089  linearmodel* lm,
7090  lrreport* ar,
7091  ae_state *_state);
7092 void lrbuilds(/* Real */ ae_matrix* xy,
7093  /* Real */ ae_vector* s,
7094  ae_int_t npoints,
7095  ae_int_t nvars,
7096  ae_int_t* info,
7097  linearmodel* lm,
7098  lrreport* ar,
7099  ae_state *_state);
7100 void lrbuildzs(/* Real */ ae_matrix* xy,
7101  /* Real */ ae_vector* s,
7102  ae_int_t npoints,
7103  ae_int_t nvars,
7104  ae_int_t* info,
7105  linearmodel* lm,
7106  lrreport* ar,
7107  ae_state *_state);
7108 void lrbuildz(/* Real */ ae_matrix* xy,
7109  ae_int_t npoints,
7110  ae_int_t nvars,
7111  ae_int_t* info,
7112  linearmodel* lm,
7113  lrreport* ar,
7114  ae_state *_state);
7116  /* Real */ ae_vector* v,
7117  ae_int_t* nvars,
7118  ae_state *_state);
7119 void lrpack(/* Real */ ae_vector* v,
7120  ae_int_t nvars,
7121  linearmodel* lm,
7122  ae_state *_state);
7124  /* Real */ ae_vector* x,
7125  ae_state *_state);
7127  /* Real */ ae_matrix* xy,
7128  ae_int_t npoints,
7129  ae_state *_state);
7131  /* Real */ ae_matrix* xy,
7132  ae_int_t npoints,
7133  ae_state *_state);
7135  /* Real */ ae_matrix* xy,
7136  ae_int_t npoints,
7137  ae_state *_state);
7138 void lrcopy(linearmodel* lm1, linearmodel* lm2, ae_state *_state);
7139 void lrlines(/* Real */ ae_matrix* xy,
7140  /* Real */ ae_vector* s,
7141  ae_int_t n,
7142  ae_int_t* info,
7143  double* a,
7144  double* b,
7145  double* vara,
7146  double* varb,
7147  double* covab,
7148  double* corrab,
7149  double* p,
7150  ae_state *_state);
7151 void lrline(/* Real */ ae_matrix* xy,
7152  ae_int_t n,
7153  ae_int_t* info,
7154  double* a,
7155  double* b,
7156  ae_state *_state);
7157 void _linearmodel_init(void* _p, ae_state *_state);
7158 void _linearmodel_init_copy(void* _dst, void* _src, ae_state *_state);
7159 void _linearmodel_clear(void* _p);
7160 void _linearmodel_destroy(void* _p);
7161 void _lrreport_init(void* _p, ae_state *_state);
7162 void _lrreport_init_copy(void* _dst, void* _src, ae_state *_state);
7163 void _lrreport_clear(void* _p);
7164 void _lrreport_destroy(void* _p);
7165 void filtersma(/* Real */ ae_vector* x,
7166  ae_int_t n,
7167  ae_int_t k,
7168  ae_state *_state);
7169 void filterema(/* Real */ ae_vector* x,
7170  ae_int_t n,
7171  double alpha,
7172  ae_state *_state);
7173 void filterlrma(/* Real */ ae_vector* x,
7174  ae_int_t n,
7175  ae_int_t k,
7176  ae_state *_state);
7177 void mnltrainh(/* Real */ ae_matrix* xy,
7178  ae_int_t npoints,
7179  ae_int_t nvars,
7180  ae_int_t nclasses,
7181  ae_int_t* info,
7182  logitmodel* lm,
7183  mnlreport* rep,
7184  ae_state *_state);
7186  /* Real */ ae_vector* x,
7187  /* Real */ ae_vector* y,
7188  ae_state *_state);
7190  /* Real */ ae_vector* x,
7191  /* Real */ ae_vector* y,
7192  ae_state *_state);
7194  /* Real */ ae_matrix* a,
7195  ae_int_t* nvars,
7196  ae_int_t* nclasses,
7197  ae_state *_state);
7198 void mnlpack(/* Real */ ae_matrix* a,
7199  ae_int_t nvars,
7200  ae_int_t nclasses,
7201  logitmodel* lm,
7202  ae_state *_state);
7203 void mnlcopy(logitmodel* lm1, logitmodel* lm2, ae_state *_state);
7205  /* Real */ ae_matrix* xy,
7206  ae_int_t npoints,
7207  ae_state *_state);
7209  /* Real */ ae_matrix* xy,
7210  ae_int_t npoints,
7211  ae_state *_state);
7213  /* Real */ ae_matrix* xy,
7214  ae_int_t npoints,
7215  ae_state *_state);
7217  /* Real */ ae_matrix* xy,
7218  ae_int_t npoints,
7219  ae_state *_state);
7221  /* Real */ ae_matrix* xy,
7222  ae_int_t ssize,
7223  ae_state *_state);
7225  /* Real */ ae_matrix* xy,
7226  ae_int_t npoints,
7227  ae_state *_state);
7228 void _logitmodel_init(void* _p, ae_state *_state);
7229 void _logitmodel_init_copy(void* _dst, void* _src, ae_state *_state);
7230 void _logitmodel_clear(void* _p);
7231 void _logitmodel_destroy(void* _p);
7232 void _logitmcstate_init(void* _p, ae_state *_state);
7233 void _logitmcstate_init_copy(void* _dst, void* _src, ae_state *_state);
7234 void _logitmcstate_clear(void* _p);
7235 void _logitmcstate_destroy(void* _p);
7236 void _mnlreport_init(void* _p, ae_state *_state);
7237 void _mnlreport_init_copy(void* _dst, void* _src, ae_state *_state);
7238 void _mnlreport_clear(void* _p);
7239 void _mnlreport_destroy(void* _p);
7240 void mcpdcreate(ae_int_t n, mcpdstate* s, ae_state *_state);
7242  ae_int_t entrystate,
7243  mcpdstate* s,
7244  ae_state *_state);
7246  ae_int_t exitstate,
7247  mcpdstate* s,
7248  ae_state *_state);
7250  ae_int_t entrystate,
7251  ae_int_t exitstate,
7252  mcpdstate* s,
7253  ae_state *_state);
7255  /* Real */ ae_matrix* xy,
7256  ae_int_t k,
7257  ae_state *_state);
7259  /* Real */ ae_matrix* ec,
7260  ae_state *_state);
7262  ae_int_t i,
7263  ae_int_t j,
7264  double c,
7265  ae_state *_state);
7267  /* Real */ ae_matrix* bndl,
7268  /* Real */ ae_matrix* bndu,
7269  ae_state *_state);
7271  ae_int_t i,
7272  ae_int_t j,
7273  double bndl,
7274  double bndu,
7275  ae_state *_state);
7277  /* Real */ ae_matrix* c,
7278  /* Integer */ ae_vector* ct,
7279  ae_int_t k,
7280  ae_state *_state);
7281 void mcpdsettikhonovregularizer(mcpdstate* s, double v, ae_state *_state);
7283  /* Real */ ae_matrix* pp,
7284  ae_state *_state);
7286  /* Real */ ae_vector* pw,
7287  ae_state *_state);
7288 void mcpdsolve(mcpdstate* s, ae_state *_state);
7290  /* Real */ ae_matrix* p,
7291  mcpdreport* rep,
7292  ae_state *_state);
7293 void _mcpdstate_init(void* _p, ae_state *_state);
7294 void _mcpdstate_init_copy(void* _dst, void* _src, ae_state *_state);
7295 void _mcpdstate_clear(void* _p);
7296 void _mcpdstate_destroy(void* _p);
7297 void _mcpdreport_init(void* _p, ae_state *_state);
7298 void _mcpdreport_init_copy(void* _dst, void* _src, ae_state *_state);
7299 void _mcpdreport_clear(void* _p);
7300 void _mcpdreport_destroy(void* _p);
7302  ae_int_t nout,
7303  ae_int_t ensemblesize,
7304  mlpensemble* ensemble,
7305  ae_state *_state);
7307  ae_int_t nhid,
7308  ae_int_t nout,
7309  ae_int_t ensemblesize,
7310  mlpensemble* ensemble,
7311  ae_state *_state);
7313  ae_int_t nhid1,
7314  ae_int_t nhid2,
7315  ae_int_t nout,
7316  ae_int_t ensemblesize,
7317  mlpensemble* ensemble,
7318  ae_state *_state);
7320  ae_int_t nout,
7321  double b,
7322  double d,
7323  ae_int_t ensemblesize,
7324  mlpensemble* ensemble,
7325  ae_state *_state);
7327  ae_int_t nhid,
7328  ae_int_t nout,
7329  double b,
7330  double d,
7331  ae_int_t ensemblesize,
7332  mlpensemble* ensemble,
7333  ae_state *_state);
7335  ae_int_t nhid1,
7336  ae_int_t nhid2,
7337  ae_int_t nout,
7338  double b,
7339  double d,
7340  ae_int_t ensemblesize,
7341  mlpensemble* ensemble,
7342  ae_state *_state);
7344  ae_int_t nout,
7345  double a,
7346  double b,
7347  ae_int_t ensemblesize,
7348  mlpensemble* ensemble,
7349  ae_state *_state);
7351  ae_int_t nhid,
7352  ae_int_t nout,
7353  double a,
7354  double b,
7355  ae_int_t ensemblesize,
7356  mlpensemble* ensemble,
7357  ae_state *_state);
7359  ae_int_t nhid1,
7360  ae_int_t nhid2,
7361  ae_int_t nout,
7362  double a,
7363  double b,
7364  ae_int_t ensemblesize,
7365  mlpensemble* ensemble,
7366  ae_state *_state);
7368  ae_int_t nout,
7369  ae_int_t ensemblesize,
7370  mlpensemble* ensemble,
7371  ae_state *_state);
7373  ae_int_t nhid,
7374  ae_int_t nout,
7375  ae_int_t ensemblesize,
7376  mlpensemble* ensemble,
7377  ae_state *_state);
7379  ae_int_t nhid1,
7380  ae_int_t nhid2,
7381  ae_int_t nout,
7382  ae_int_t ensemblesize,
7383  mlpensemble* ensemble,
7384  ae_state *_state);
7386  ae_int_t ensemblesize,
7387  mlpensemble* ensemble,
7388  ae_state *_state);
7389 void mlpecopy(mlpensemble* ensemble1,
7390  mlpensemble* ensemble2,
7391  ae_state *_state);
7392 void mlperandomize(mlpensemble* ensemble, ae_state *_state);
7394  ae_int_t* nin,
7395  ae_int_t* nout,
7396  ae_state *_state);
7398 void mlpeprocess(mlpensemble* ensemble,
7399  /* Real */ ae_vector* x,
7400  /* Real */ ae_vector* y,
7401  ae_state *_state);
7402 void mlpeprocessi(mlpensemble* ensemble,
7403  /* Real */ ae_vector* x,
7404  /* Real */ ae_vector* y,
7405  ae_state *_state);
7407  /* Real */ ae_matrix* densexy,
7408  sparsematrix* sparsexy,
7409  ae_int_t datasetsize,
7410  ae_int_t datasettype,
7411  /* Integer */ ae_vector* idx,
7412  ae_int_t subset0,
7413  ae_int_t subset1,
7414  ae_int_t subsettype,
7415  ae_shared_pool* buf,
7416  modelerrors* rep,
7417  ae_state *_state);
7419  sparsematrix* xy,
7420  ae_int_t npoints,
7421  double* relcls,
7422  double* avgce,
7423  double* rms,
7424  double* avg,
7425  double* avgrel,
7426  ae_state *_state);
7427 double mlperelclserror(mlpensemble* ensemble,
7428  /* Real */ ae_matrix* xy,
7429  ae_int_t npoints,
7430  ae_state *_state);
7431 double mlpeavgce(mlpensemble* ensemble,
7432  /* Real */ ae_matrix* xy,
7433  ae_int_t npoints,
7434  ae_state *_state);
7435 double mlpermserror(mlpensemble* ensemble,
7436  /* Real */ ae_matrix* xy,
7437  ae_int_t npoints,
7438  ae_state *_state);
7439 double mlpeavgerror(mlpensemble* ensemble,
7440  /* Real */ ae_matrix* xy,
7441  ae_int_t npoints,
7442  ae_state *_state);
7443 double mlpeavgrelerror(mlpensemble* ensemble,
7444  /* Real */ ae_matrix* xy,
7445  ae_int_t npoints,
7446  ae_state *_state);
7447 void mlpealloc(ae_serializer* s, mlpensemble* ensemble, ae_state *_state);
7449  mlpensemble* ensemble,
7450  ae_state *_state);
7452  mlpensemble* ensemble,
7453  ae_state *_state);
7454 void _mlpensemble_init(void* _p, ae_state *_state);
7455 void _mlpensemble_init_copy(void* _dst, void* _src, ae_state *_state);
7456 void _mlpensemble_clear(void* _p);
7457 void _mlpensemble_destroy(void* _p);
7459  /* Real */ ae_matrix* xy,
7460  ae_int_t npoints,
7461  double decay,
7462  ae_int_t restarts,
7463  ae_int_t* info,
7464  mlpreport* rep,
7465  ae_state *_state);
7467  /* Real */ ae_matrix* xy,
7468  ae_int_t npoints,
7469  double decay,
7470  ae_int_t restarts,
7471  double wstep,
7472  ae_int_t maxits,
7473  ae_int_t* info,
7474  mlpreport* rep,
7475  ae_state *_state);
7477  /* Real */ ae_matrix* trnxy,
7478  ae_int_t trnsize,
7479  /* Real */ ae_matrix* valxy,
7480  ae_int_t valsize,
7481  double decay,
7482  ae_int_t restarts,
7483  ae_int_t* info,
7484  mlpreport* rep,
7485  ae_state *_state);
7487  /* Real */ ae_matrix* xy,
7488  ae_int_t npoints,
7489  double decay,
7490  ae_int_t restarts,
7491  double wstep,
7492  ae_int_t maxits,
7493  ae_int_t foldscount,
7494  ae_int_t* info,
7495  mlpreport* rep,
7496  mlpcvreport* cvrep,
7497  ae_state *_state);
7499  /* Real */ ae_matrix* xy,
7500  ae_int_t npoints,
7501  double decay,
7502  ae_int_t restarts,
7503  ae_int_t foldscount,
7504  ae_int_t* info,
7505  mlpreport* rep,
7506  mlpcvreport* cvrep,
7507  ae_state *_state);
7509  multilayerperceptron* network,
7510  ae_int_t nrestarts,
7511  ae_int_t foldscount,
7512  mlpreport* rep,
7513  ae_state *_state);
7515  multilayerperceptron* network,
7516  ae_int_t nrestarts,
7517  ae_int_t foldscount,
7518  mlpreport* rep, ae_state *_state);
7520  ae_int_t nout,
7521  mlptrainer* s,
7522  ae_state *_state);
7524  ae_int_t nclasses,
7525  mlptrainer* s,
7526  ae_state *_state);
7528  /* Real */ ae_matrix* xy,
7529  ae_int_t npoints,
7530  ae_state *_state);
7532  sparsematrix* xy,
7533  ae_int_t npoints,
7534  ae_state *_state);
7535 void mlpsetdecay(mlptrainer* s, double decay, ae_state *_state);
7537  double wstep,
7538  ae_int_t maxits,
7539  ae_state *_state);
7542  multilayerperceptron* network,
7543  ae_int_t nrestarts,
7544  mlpreport* rep,
7545  ae_state *_state);
7547  multilayerperceptron* network,
7548  ae_int_t nrestarts,
7549  mlpreport* rep, ae_state *_state);
7551  multilayerperceptron* network,
7552  ae_bool randomstart,
7553  ae_state *_state);
7555  multilayerperceptron* network,
7556  ae_state *_state);
7558  multilayerperceptron* network, ae_state *_state);
7560  /* Real */ ae_matrix* xy,
7561  ae_int_t npoints,
7562  double decay,
7563  ae_int_t restarts,
7564  ae_int_t* info,
7565  mlpreport* rep,
7566  mlpcvreport* ooberrors,
7567  ae_state *_state);
7569  /* Real */ ae_matrix* xy,
7570  ae_int_t npoints,
7571  double decay,
7572  ae_int_t restarts,
7573  double wstep,
7574  ae_int_t maxits,
7575  ae_int_t* info,
7576  mlpreport* rep,
7577  mlpcvreport* ooberrors,
7578  ae_state *_state);
7579 void mlpetraines(mlpensemble* ensemble,
7580  /* Real */ ae_matrix* xy,
7581  ae_int_t npoints,
7582  double decay,
7583  ae_int_t restarts,
7584  ae_int_t* info,
7585  mlpreport* rep,
7586  ae_state *_state);
7588  mlpensemble* ensemble,
7589  ae_int_t nrestarts,
7590  mlpreport* rep,
7591  ae_state *_state);
7593  mlpensemble* ensemble,
7594  ae_int_t nrestarts,
7595  mlpreport* rep, ae_state *_state);
7596 void _mlpreport_init(void* _p, ae_state *_state);
7597 void _mlpreport_init_copy(void* _dst, void* _src, ae_state *_state);
7598 void _mlpreport_clear(void* _p);
7599 void _mlpreport_destroy(void* _p);
7600 void _mlpcvreport_init(void* _p, ae_state *_state);
7601 void _mlpcvreport_init_copy(void* _dst, void* _src, ae_state *_state);
7602 void _mlpcvreport_clear(void* _p);
7603 void _mlpcvreport_destroy(void* _p);
7604 void _smlptrnsession_init(void* _p, ae_state *_state);
7605 void _smlptrnsession_init_copy(void* _dst, void* _src, ae_state *_state);
7606 void _smlptrnsession_clear(void* _p);
7608 void _mlpetrnsession_init(void* _p, ae_state *_state);
7609 void _mlpetrnsession_init_copy(void* _dst, void* _src, ae_state *_state);
7610 void _mlpetrnsession_clear(void* _p);
7612 void _mlptrainer_init(void* _p, ae_state *_state);
7613 void _mlptrainer_init_copy(void* _dst, void* _src, ae_state *_state);
7614 void _mlptrainer_clear(void* _p);
7615 void _mlptrainer_destroy(void* _p);
7616 void _mlpparallelizationcv_init(void* _p, ae_state *_state);
7617 void _mlpparallelizationcv_init_copy(void* _dst, void* _src, ae_state *_state);
7622  /* Real */ ae_matrix* xy,
7623  ae_int_t npoints,
7624  ae_int_t nfeatures,
7625  ae_int_t disttype,
7626  ae_state *_state);
7628  /* Real */ ae_matrix* d,
7629  ae_int_t npoints,
7630  ae_bool isupper,
7631  ae_state *_state);
7633  ae_int_t algo,
7634  ae_state *_state);
7636  ae_int_t restarts,
7637  ae_int_t maxits,
7638  ae_state *_state);
7640  ae_int_t initalgo,
7641  ae_state *_state);
7643  ahcreport* rep,
7644  ae_state *_state);
7646  ahcreport* rep, ae_state *_state);
7648  ae_int_t k,
7649  kmeansreport* rep,
7650  ae_state *_state);
7652  ae_int_t k,
7653  kmeansreport* rep, ae_state *_state);
7655  ae_int_t npoints,
7656  ae_int_t nfeatures,
7657  ae_int_t disttype,
7658  /* Real */ ae_matrix* d,
7659  ae_state *_state);
7661  ae_int_t npoints,
7662  ae_int_t nfeatures,
7663  ae_int_t disttype,
7664  /* Real */ ae_matrix* d, ae_state *_state);
7666  /* Real */ ae_matrix* xy,
7667  ae_int_t npoints,
7668  ae_int_t nfeatures,
7669  ae_int_t disttype,
7670  /* Real */ ae_matrix* d,
7671  ae_state *_state);
7673  ae_int_t k,
7674  /* Integer */ ae_vector* cidx,
7675  /* Integer */ ae_vector* cz,
7676  ae_state *_state);
7678  double r,
7679  ae_int_t* k,
7680  /* Integer */ ae_vector* cidx,
7681  /* Integer */ ae_vector* cz,
7682  ae_state *_state);
7684  double r,
7685  ae_int_t* k,
7686  /* Integer */ ae_vector* cidx,
7687  /* Integer */ ae_vector* cz,
7688  ae_state *_state);
7690 void kmeansgenerateinternal(/* Real */ ae_matrix* xy,
7691  ae_int_t npoints,
7692  ae_int_t nvars,
7693  ae_int_t k,
7694  ae_int_t initalgo,
7695  ae_int_t maxits,
7696  ae_int_t restarts,
7697  ae_bool kmeansdbgnoits,
7698  ae_int_t* info,
7699  ae_int_t* iterationscount,
7700  /* Real */ ae_matrix* ccol,
7701  ae_bool needccol,
7702  /* Real */ ae_matrix* crow,
7703  ae_bool needcrow,
7704  /* Integer */ ae_vector* xyc,
7705  double* energy,
7706  kmeansbuffers* buf,
7707  ae_state *_state);
7708 void kmeansupdatedistances(/* Real */ ae_matrix* xy,
7709  ae_int_t idx0,
7710  ae_int_t idx1,
7711  ae_int_t nvars,
7712  /* Real */ ae_matrix* ct,
7713  ae_int_t cidx0,
7714  ae_int_t cidx1,
7715  /* Integer */ ae_vector* xyc,
7716  /* Real */ ae_vector* xydist2,
7717  ae_shared_pool* bufferpool,
7718  ae_state *_state);
7719 void _kmeansbuffers_init(void* _p, ae_state *_state);
7720 void _kmeansbuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7721 void _kmeansbuffers_clear(void* _p);
7722 void _kmeansbuffers_destroy(void* _p);
7723 void _clusterizerstate_init(void* _p, ae_state *_state);
7724 void _clusterizerstate_init_copy(void* _dst, void* _src, ae_state *_state);
7727 void _ahcreport_init(void* _p, ae_state *_state);
7728 void _ahcreport_init_copy(void* _dst, void* _src, ae_state *_state);
7729 void _ahcreport_clear(void* _p);
7730 void _ahcreport_destroy(void* _p);
7731 void _kmeansreport_init(void* _p, ae_state *_state);
7732 void _kmeansreport_init_copy(void* _dst, void* _src, ae_state *_state);
7733 void _kmeansreport_clear(void* _p);
7734 void _kmeansreport_destroy(void* _p);
7736  ae_int_t npoints,
7737  ae_int_t nvars,
7738  ae_int_t nclasses,
7739  ae_int_t ntrees,
7740  double r,
7741  ae_int_t* info,
7742  decisionforest* df,
7743  dfreport* rep,
7744  ae_state *_state);
7746  ae_int_t npoints,
7747  ae_int_t nvars,
7748  ae_int_t nclasses,
7749  ae_int_t ntrees,
7750  ae_int_t nrndvars,
7751  double r,
7752  ae_int_t* info,
7753  decisionforest* df,
7754  dfreport* rep,
7755  ae_state *_state);
7756 void dfbuildinternal(/* Real */ ae_matrix* xy,
7757  ae_int_t npoints,
7758  ae_int_t nvars,
7759  ae_int_t nclasses,
7760  ae_int_t ntrees,
7761  ae_int_t samplesize,
7762  ae_int_t nfeatures,
7763  ae_int_t flags,
7764  ae_int_t* info,
7765  decisionforest* df,
7766  dfreport* rep,
7767  ae_state *_state);
7769  /* Real */ ae_vector* x,
7770  /* Real */ ae_vector* y,
7771  ae_state *_state);
7773  /* Real */ ae_vector* x,
7774  /* Real */ ae_vector* y,
7775  ae_state *_state);
7777  /* Real */ ae_matrix* xy,
7778  ae_int_t npoints,
7779  ae_state *_state);
7781  /* Real */ ae_matrix* xy,
7782  ae_int_t npoints,
7783  ae_state *_state);
7785  /* Real */ ae_matrix* xy,
7786  ae_int_t npoints,
7787  ae_state *_state);
7789  /* Real */ ae_matrix* xy,
7790  ae_int_t npoints,
7791  ae_state *_state);
7793  /* Real */ ae_matrix* xy,
7794  ae_int_t npoints,
7795  ae_state *_state);
7796 void dfcopy(decisionforest* df1, decisionforest* df2, ae_state *_state);
7797 void dfalloc(ae_serializer* s, decisionforest* forest, ae_state *_state);
7799  decisionforest* forest,
7800  ae_state *_state);
7802  decisionforest* forest,
7803  ae_state *_state);
7804 void _decisionforest_init(void* _p, ae_state *_state);
7805 void _decisionforest_init_copy(void* _dst, void* _src, ae_state *_state);
7806 void _decisionforest_clear(void* _p);
7808 void _dfreport_init(void* _p, ae_state *_state);
7809 void _dfreport_init_copy(void* _dst, void* _src, ae_state *_state);
7810 void _dfreport_clear(void* _p);
7811 void _dfreport_destroy(void* _p);
7812 void _dfinternalbuffers_init(void* _p, ae_state *_state);
7813 void _dfinternalbuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7816 void kmeansgenerate(/* Real */ ae_matrix* xy,
7817  ae_int_t npoints,
7818  ae_int_t nvars,
7819  ae_int_t k,
7820  ae_int_t restarts,
7821  ae_int_t* info,
7822  /* Real */ ae_matrix* c,
7823  /* Integer */ ae_vector* xyc,
7824  ae_state *_state);
7825 
7826 }
7827 #endif
7828 
void _smlptrnsession_destroy(void *_p)
double _pexec_mlperrorsubset(multilayerperceptron *network, ae_matrix *xy, ae_int_t setsize, ae_vector *subset, ae_int_t subsetsize, ae_state *_state)
void dfcopy(decisionforest *df1, decisionforest *df2, ae_state *_state)
ae_int_t mlpgetoutputscount(const multilayerperceptron &network)
double mlperrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void _cvreport_destroy(void *_p)
void _logitmodel_init(void *_p, ae_state *_state)
double mlpavgcesparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
void mlptrainlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep)
void dfprocessi(decisionforest *df, ae_vector *x, ae_vector *y, ae_state *_state)
void dsnormalize(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t *info, ae_vector *means, ae_vector *sigmas, ae_state *_state)
void mlpeunserialize(const std::string &s_in, mlpensemble &obj)
mcpdreport(const mcpdreport &rhs)
void filtersma(ae_vector *x, ae_int_t n, ae_int_t k, ae_state *_state)
void fisherldan(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t nclasses, ae_int_t *info, ae_matrix *w, ae_state *_state)
void _decisionforest_init(void *_p, ae_state *_state)
void _mlpensemble_init_copy(void *_dst, void *_src, ae_state *_state)
void _lrreport_destroy(void *_p)
void dfserialize(ae_serializer *s, decisionforest *forest, ae_state *_state)
void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy, const ae_int_t k)
void smp_mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad)
alglib_impl::ahcreport * c_ptr()
void clusterizergetdistancesbuf(apbuffers *buf, ae_matrix *xy, ae_int_t npoints, ae_int_t nfeatures, ae_int_t disttype, ae_matrix *d, ae_state *_state)
_modelerrors_owner & operator=(const _modelerrors_owner &rhs)
void mlpsetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, const ae_int_t fkind, const double threshold)
void mlpsetoutputscaling(multilayerperceptron *network, ae_int_t i, double mean, double sigma, ae_state *_state)
void pcabuildbasis(ae_matrix *x, ae_int_t npoints, ae_int_t nvars, ae_int_t *info, ae_vector *s2, ae_matrix *v, ae_state *_state)
void smp_pcatruncatedsubspace(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nneeded, const double eps, const ae_int_t maxits, real_1d_array &s2, real_2d_array &v)
void _mlpparallelizationcv_clear(void *_p)
void dfbuildinternal(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t nclasses, ae_int_t ntrees, ae_int_t samplesize, ae_int_t nfeatures, ae_int_t flags, ae_int_t *info, decisionforest *df, dfreport *rep, ae_state *_state)
void _dfinternalbuffers_clear(void *_p)
void _kmeansbuffers_init_copy(void *_dst, void *_src, ae_state *_state)
void mlpprocessi(const multilayerperceptron &network, const real_1d_array &x, real_1d_array &y)
void mlpecreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble)
void kmeansgenerateinternal(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t k, ae_int_t initalgo, ae_int_t maxits, ae_int_t restarts, ae_bool kmeansdbgnoits, ae_int_t *info, ae_int_t *iterationscount, ae_matrix *ccol, ae_bool needccol, ae_matrix *crow, ae_bool needcrow, ae_vector *xyc, double *energy, kmeansbuffers *buf, ae_state *_state)
multilayerperceptron(const multilayerperceptron &rhs)
void mlpcreate2(ae_int_t nin, ae_int_t nhid1, ae_int_t nhid2, ae_int_t nout, multilayerperceptron *network, ae_state *_state)
double _pexec_mlpavgerrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void _kmeansbuffers_clear(void *_p)
void mlpecreate1(ae_int_t nin, ae_int_t nhid, ae_int_t nout, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void filterema(ae_vector *x, ae_int_t n, double alpha, ae_state *_state)
void mlpecreatec1(ae_int_t nin, ae_int_t nhid, ae_int_t nout, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void mlpinternalprocessvector(ae_vector *structinfo, ae_vector *weights, ae_vector *columnmeans, ae_vector *columnsigmas, ae_vector *neurons, ae_vector *dfdnet, ae_vector *x, ae_vector *y, ae_state *_state)
void mlpecreateb0(ae_int_t nin, ae_int_t nout, double b, double d, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
_ahcreport_owner & operator=(const _ahcreport_owner &rhs)
double lrrmserror(linearmodel *lm, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double dfavgce(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints)
void mlpallerrorssubset(multilayerperceptron *network, ae_matrix *xy, ae_int_t setsize, ae_vector *subset, ae_int_t subsetsize, modelerrors *rep, ae_state *_state)
void _pexec_mlpgradbatch(multilayerperceptron *network, ae_matrix *xy, ae_int_t ssize, double *e, ae_vector *grad, ae_state *_state)
void mlptrainensemblees(mlptrainer *s, mlpensemble *ensemble, ae_int_t nrestarts, mlpreport *rep, ae_state *_state)
double lravgerror(linearmodel *lm, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mlpsetdecay(mlptrainer *s, double decay, ae_state *_state)
void mlpcreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, multilayerperceptron &network)
double dsgetmeanmindistance(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_state *_state)
_logitmodel_owner & operator=(const _logitmodel_owner &rhs)
void pcabuildbasis(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, real_1d_array &s2, real_2d_array &v)
ae_int_t _pexec_mlpclserror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void mcpdcreateentry(const ae_int_t n, const ae_int_t entrystate, mcpdstate &s)
alglib_impl::mcpdreport * c_ptr()
void mlpgradn(multilayerperceptron *network, ae_vector *x, ae_vector *desiredy, double *e, ae_vector *grad, ae_state *_state)
void mlpcreatec2(ae_int_t nin, ae_int_t nhid1, ae_int_t nhid2, ae_int_t nout, multilayerperceptron *network, ae_state *_state)
_mlptrainer_owner & operator=(const _mlptrainer_owner &rhs)
void lrbuildzs(ae_matrix *xy, ae_vector *s, ae_int_t npoints, ae_int_t nvars, ae_int_t *info, linearmodel *lm, lrreport *ar, ae_state *_state)
void _pexec_fisherldan(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t nclasses, ae_int_t *info, ae_matrix *w, ae_state *_state)
alglib_impl::lrreport * p_struct
Definition: dataanalysis.h:500
void _mlpreport_init_copy(void *_dst, void *_src, ae_state *_state)
ae_int_t & terminationtype
Definition: dataanalysis.h:630
void _mlpetrnsession_clear(void *_p)
double _pexec_mlprmserror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mlpcreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network)
void mlpgradbatchsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad)
void mcpdsetpredictionweights(mcpdstate *s, ae_vector *pw, ae_state *_state)
void mlpallerrorssubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep)
void mlpprocess(multilayerperceptron *network, ae_vector *x, ae_vector *y, ae_state *_state)
void _mlptrainer_destroy(void *_p)
void mlpecreateb2(ae_int_t nin, ae_int_t nhid1, ae_int_t nhid2, ae_int_t nout, double b, double d, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
alglib_impl::lrreport * c_ptr()
void _logitmodel_init_copy(void *_dst, void *_src, ae_state *_state)
void _clusterizerstate_destroy(void *_p)
void mlpecopy(mlpensemble *ensemble1, mlpensemble *ensemble2, ae_state *_state)
double mlperrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
double & cvrmserror
Definition: dataanalysis.h:497
void mlpgradbatchx(multilayerperceptron *network, ae_matrix *densexy, sparsematrix *sparsexy, ae_int_t datasetsize, ae_int_t datasettype, ae_vector *idx, ae_int_t subset0, ae_int_t subset1, ae_int_t subsettype, ae_shared_pool *buf, ae_shared_pool *gradbuf, ae_state *_state)
void mlpeproperties(mlpensemble *ensemble, ae_int_t *nin, ae_int_t *nout, ae_state *_state)
void _mlptrainer_init(void *_p, ae_state *_state)
clusterizerstate(const clusterizerstate &rhs)
alglib_impl::decisionforest * p_struct
void _dfreport_init(void *_p, ae_state *_state)
void clusterizercreate(clusterizerstate *s, ae_state *_state)
linearmodel(const linearmodel &rhs)
double mlperelclserror(mlpensemble *ensemble, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mlpproperties(multilayerperceptron *network, ae_int_t *nin, ae_int_t *nout, ae_int_t *wcount, ae_state *_state)
decisionforest(const decisionforest &rhs)
void mlpebagginglm(mlpensemble *ensemble, ae_matrix *xy, ae_int_t npoints, double decay, ae_int_t restarts, ae_int_t *info, mlpreport *rep, mlpcvreport *ooberrors, ae_state *_state)
alglib_impl::decisionforest * c_ptr()
void _linearmodel_clear(void *_p)
void dserraccumulate(ae_vector *buf, ae_vector *y, ae_vector *desiredy, ae_state *_state)
void mlpecreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble)
double _pexec_mlperrorsparsesubset(multilayerperceptron *network, sparsematrix *xy, ae_int_t setsize, ae_vector *subset, ae_int_t subsetsize, ae_state *_state)
void mlperandomize(mlpensemble *ensemble, ae_state *_state)
void lrline(ae_matrix *xy, ae_int_t n, ae_int_t *info, double *a, double *b, ae_state *_state)
mlptrainer & operator=(const mlptrainer &rhs)
ae_int_t & npoints
Definition: dataanalysis.h:919
void mlpcreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, multilayerperceptron &network)
modelerrors(const modelerrors &rhs)
void mlpebagginglbfgs(mlpensemble *ensemble, ae_matrix *xy, ae_int_t npoints, double decay, ae_int_t restarts, double wstep, ae_int_t maxits, ae_int_t *info, mlpreport *rep, mlpcvreport *ooberrors, ae_state *_state)
void dfprocessi(const decisionforest &df, const real_1d_array &x, real_1d_array &y)
void _mlpparallelizationcv_init(void *_p, ae_state *_state)
alglib_impl::mlpcvreport * p_struct
Definition: dataanalysis.h:721
void mcpdsetprior(const mcpdstate &s, const real_2d_array &pp)
void _pexec_clusterizerrunahc(clusterizerstate *s, ahcreport *rep, ae_state *_state)
alglib_impl::logitmodel * p_struct
Definition: dataanalysis.h:522
double & oobavgrelerror
double & avgrelerror
Definition: dataanalysis.h:496
void mlpcopyshared(multilayerperceptron *network1, multilayerperceptron *network2, ae_state *_state)
void mlpunserialize(const std::string &s_in, multilayerperceptron &obj)
void dfunserialize(const std::string &s_in, decisionforest &obj)
void kmeansinitbuf(kmeansbuffers *buf, ae_state *_state)
ae_int_t mlpclserror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mlpcreatetrainer(const ae_int_t nin, const ae_int_t nout, mlptrainer &s)
mlptrainer(const mlptrainer &rhs)
void _linearmodel_init(void *_p, ae_state *_state)
void smp_mlpallerrorssubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep)
void _linearmodel_init_copy(void *_dst, void *_src, ae_state *_state)
void _mnlreport_clear(void *_p)
_kmeansreport_owner & operator=(const _kmeansreport_owner &rhs)
void mlpecreate0(ae_int_t nin, ae_int_t nout, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void _kmeansreport_clear(void *_p)
ae_int_t & ncholesky
Definition: dataanalysis.h:701
void mlpeunserialize(ae_serializer *s, mlpensemble *ensemble, ae_state *_state)
double smp_mlprmserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
void _mlpensemble_init(void *_p, ae_state *_state)
void filterlrma(real_1d_array &x, const ae_int_t n, const ae_int_t k)
void _ahcreport_clear(void *_p)
double mlpavgce(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
ae_int_t mnlclserror(logitmodel *lm, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mlpcreatetrainercls(const ae_int_t nin, const ae_int_t nclasses, mlptrainer &s)
void smp_clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmeansreport &rep)
void lrbuildz(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t *info, linearmodel *lm, lrreport *ar, ae_state *_state)
void _logitmodel_clear(void *_p)
mlpensemble & operator=(const mlpensemble &rhs)
void mlpecreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble)
void mlptrainlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep)
mnlreport(const mnlreport &rhs)
void _cvreport_init(void *_p, ae_state *_state)
void mlpsetoutputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma)
bool smp_mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &network)
void _pexec_mlpallerrorssubset(multilayerperceptron *network, ae_matrix *xy, ae_int_t setsize, ae_vector *subset, ae_int_t subsetsize, modelerrors *rep, ae_state *_state)
alglib_impl::mcpdreport * p_struct
Definition: dataanalysis.h:632
void mlpactivationfunction(double net, ae_int_t k, double *f, double *df, double *d2f, ae_state *_state)
void mlpcreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, multilayerperceptron &network)
void smp_clusterizerrunahc(const clusterizerstate &s, ahcreport &rep)
void mlpecreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble)
void _multilayerperceptron_clear(void *_p)
void mcpdcreateentryexit(ae_int_t n, ae_int_t entrystate, ae_int_t exitstate, mcpdstate *s, ae_state *_state)
double lravgerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints)
void mlpecreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble)
_lrreport_owner & operator=(const _lrreport_owner &rhs)
void fisherldan(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_2d_array &w)
void mlpcreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, multilayerperceptron &network)
void mlpcopytunableparameters(multilayerperceptron *network1, multilayerperceptron *network2, ae_state *_state)
void _mcpdreport_clear(void *_p)
double & relclserror
void clusterizerseparatedbycorr(ahcreport *rep, double r, ae_int_t *k, ae_vector *cidx, ae_vector *cz, ae_state *_state)
double & oobrmserror
void _mlpreport_clear(void *_p)
double dfrelclserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints)
void mnlprocess(logitmodel *lm, ae_vector *x, ae_vector *y, ae_state *_state)
void mlpeallerrorssparse(mlpensemble *ensemble, sparsematrix *xy, ae_int_t npoints, double *relcls, double *avgce, double *rms, double *avg, double *avgrel, ae_state *_state)
double mlprelclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void dssplitk(ae_vector *a, ae_vector *c, ae_int_t n, ae_int_t nc, ae_int_t kmax, ae_int_t *info, ae_vector *thresholds, ae_int_t *ni, double *cve, ae_state *_state)
void mlpecreatefromnetwork(multilayerperceptron *network, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void _decisionforest_init_copy(void *_dst, void *_src, ae_state *_state)
ae_int_t mlpgradsplitsize(ae_state *_state)
_decisionforest_owner & operator=(const _decisionforest_owner &rhs)
alglib_impl::clusterizerstate * c_ptr()
void mcpdaddec(mcpdstate *s, ae_int_t i, ae_int_t j, double c, ae_state *_state)
void dsoptimalsplit2(ae_vector *a, ae_vector *c, ae_int_t n, ae_int_t *info, double *threshold, double *pal, double *pbl, double *par, double *pbr, double *cve, ae_state *_state)
void mlpgradnbatch(multilayerperceptron *network, ae_matrix *xy, ae_int_t ssize, double *e, ae_vector *grad, ae_state *_state)
double dfavgerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints)
void clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmeansreport &rep)
void _mlpetrnsession_destroy(void *_p)
double smp_mlpavgce(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void _mcpdreport_init(void *_p, ae_state *_state)
void _pexec_mlptrainensemblees(mlptrainer *s, mlpensemble *ensemble, ae_int_t nrestarts, mlpreport *rep, ae_state *_state)
void _mcpdstate_init(void *_p, ae_state *_state)
double lravgrelerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints)
alglib_impl::kmeansreport * c_ptr()
void mlpactivationfunction(const double net, const ae_int_t k, double &f, double &df, double &d2f)
void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep)
void _pexec_mlpkfoldcv(mlptrainer *s, multilayerperceptron *network, ae_int_t nrestarts, ae_int_t foldscount, mlpreport *rep, ae_state *_state)
void mcpdcreateentry(ae_int_t n, ae_int_t entrystate, mcpdstate *s, ae_state *_state)
void mcpdsolve(mcpdstate *s, ae_state *_state)
double mlpeavgce(mlpensemble *ensemble, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void dsnormalizec(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t *info, ae_vector *means, ae_vector *sigmas, ae_state *_state)
virtual ~logitmodel()
void _modelerrors_init_copy(void *_dst, void *_src, ae_state *_state)
void mlpgradn(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad)
ahcreport & operator=(const ahcreport &rhs)
dfreport(const dfreport &rhs)
virtual ~mlpensemble()
virtual ~mnlreport()
void clusterizersetahcalgo(clusterizerstate *s, ae_int_t algo, ae_state *_state)
void lrbuildz(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar)
double mlpermserror(mlpensemble *ensemble, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mlpcreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, multilayerperceptron &network)
alglib_impl::multilayerperceptron * p_struct
Definition: dataanalysis.h:419
void mlpinitpreprocessorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t ssize, ae_state *_state)
clusterizerstate & operator=(const clusterizerstate &rhs)
void _modelerrors_clear(void *_p)
void mlpcreatec0(ae_int_t nin, ae_int_t nout, multilayerperceptron *network, ae_state *_state)
double mlprelclserrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void _mlpreport_destroy(void *_p)
void mlpunserializeold(ae_vector *ra, multilayerperceptron *network, ae_state *_state)
void mlpeproperties(const mlpensemble &ensemble, ae_int_t &nin, ae_int_t &nout)
void mlpcreater0(ae_int_t nin, ae_int_t nout, double a, double b, multilayerperceptron *network, ae_state *_state)
double mlpeavgerror(mlpensemble *ensemble, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double mnlrelclserror(logitmodel *lm, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double mlpavgerror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double mnlavgerror(logitmodel *lm, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
logitmodel & operator=(const logitmodel &rhs)
void dfalloc(ae_serializer *s, decisionforest *forest, ae_state *_state)
double mlpavgrelerror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void pcatruncatedsubspace(ae_matrix *x, ae_int_t npoints, ae_int_t nvars, ae_int_t nneeded, double eps, ae_int_t maxits, ae_vector *s2, ae_matrix *v, ae_state *_state)
void mcpdsetprior(mcpdstate *s, ae_matrix *pp, ae_state *_state)
double mlperror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
alglib_impl::mlpreport * p_struct
Definition: dataanalysis.h:700
void mlpcreatec0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network)
void _smlpgrad_init_copy(void *_dst, void *_src, ae_state *_state)
void _clusterizerstate_init_copy(void *_dst, void *_src, ae_state *_state)
void _multilayerperceptron_init_copy(void *_dst, void *_src, ae_state *_state)
double mlpavgerrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void mcpdcreate(ae_int_t n, mcpdstate *s, ae_state *_state)
void dstiefasti(ae_vector *a, ae_vector *b, ae_int_t n, ae_vector *ties, ae_int_t *tiecount, ae_vector *bufr, ae_vector *bufi, ae_state *_state)
void _dfinternalbuffers_destroy(void *_p)
double mlprmserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
ae_int_t smp_mlpclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void dfbuildrandomdecisionforestx1(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const ae_int_t nrndvars, const double r, ae_int_t &info, decisionforest &df, dfreport &rep)
void mlpecreatec0(ae_int_t nin, ae_int_t nout, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
ae_bool _pexec_mlpcontinuetraining(mlptrainer *s, multilayerperceptron *network, ae_state *_state)
double mlpermserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints)
void pcatruncatedsubspace(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nneeded, const double eps, const ae_int_t maxits, real_1d_array &s2, real_2d_array &v)
void mlpstarttraining(mlptrainer *s, multilayerperceptron *network, ae_bool randomstart, ae_state *_state)
void _kmeansbuffers_init(void *_p, ae_state *_state)
void mlpgetinputscaling(multilayerperceptron *network, ae_int_t i, double *mean, double *sigma, ae_state *_state)
void mlpcreater1(ae_int_t nin, ae_int_t nhid, ae_int_t nout, double a, double b, multilayerperceptron *network, ae_state *_state)
void mlpsetcond(mlptrainer *s, double wstep, ae_int_t maxits, ae_state *_state)
void mlpgradnbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad)
void mlpgetoutputscaling(multilayerperceptron *network, ae_int_t i, double *mean, double *sigma, ae_state *_state)
void mlprandomize(multilayerperceptron *network, ae_state *_state)
void mnlunpack(const logitmodel &lm, real_2d_array &a, ae_int_t &nvars, ae_int_t &nclasses)
alglib_impl::kmeansreport * p_struct
void mlphessianbatch(multilayerperceptron *network, ae_matrix *xy, ae_int_t ssize, double *e, ae_vector *grad, ae_matrix *h, ae_state *_state)
void _pexec_mlpgradbatchsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t ssize, double *e, ae_vector *grad, ae_state *_state)
void _pexec_mlptrainnetwork(mlptrainer *s, multilayerperceptron *network, ae_int_t nrestarts, mlpreport *rep, ae_state *_state)
alglib_impl::mcpdstate * c_ptr()
double lrrmserror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints)
integer_1d_array cidx
Definition: dataanalysis.h:991
kmeansreport(const kmeansreport &rhs)
void mlpsetdataset(const mlptrainer &s, const real_2d_array &xy, const ae_int_t npoints)
ae_int_t & ngrad
Definition: dataanalysis.h:699
void mlpkfoldcvlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep)
multilayerperceptron & operator=(const multilayerperceptron &rhs)
void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad)
void _mlpparallelizationcv_init_copy(void *_dst, void *_src, ae_state *_state)
double mlperrorn(multilayerperceptron *network, ae_matrix *xy, ae_int_t ssize, ae_state *_state)
void filtersma(real_1d_array &x, const ae_int_t n, const ae_int_t k)
alglib_impl::mlptrainer * c_ptr()
integer_2d_array pm
Definition: dataanalysis.h:923
double mnlavgrelerror(logitmodel *lm, ae_matrix *xy, ae_int_t ssize, ae_state *_state)
void lrbuilds(const real_2d_array &xy, const real_1d_array &s, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar)
alglib_impl::mcpdstate * p_struct
Definition: dataanalysis.h:587
double smp_mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
double & avgrelerror
Definition: dataanalysis.h:698
void _smlpgrad_destroy(void *_p)
void _dfreport_init_copy(void *_dst, void *_src, ae_state *_state)
alglib_impl::mlpensemble * p_struct
Definition: dataanalysis.h:649
void _cvreport_clear(void *_p)
void dsoptimalsplit2fast(ae_vector *a, ae_vector *c, ae_vector *tiesbuf, ae_vector *cntbuf, ae_vector *bufr, ae_vector *bufi, ae_int_t n, ae_int_t nc, double alpha, ae_int_t *info, double *threshold, double *rms, double *cvrms, ae_state *_state)
void mlpebagginglm(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors)
double mlpavgerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
void mlpgrad(multilayerperceptron *network, ae_vector *x, ae_vector *desiredy, double *e, ae_vector *grad, ae_state *_state)
void dstie(ae_vector *a, ae_int_t n, ae_vector *ties, ae_int_t *tiecount, ae_vector *p1, ae_vector *p2, ae_state *_state)
void mcpdcreateexit(ae_int_t n, ae_int_t exitstate, mcpdstate *s, ae_state *_state)
void mnlunpack(logitmodel *lm, ae_matrix *a, ae_int_t *nvars, ae_int_t *nclasses, ae_state *_state)
void mlpcreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network)
#define ae_bool
Definition: ap.h:193
void fisherlda(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t nclasses, ae_int_t *info, ae_vector *w, ae_state *_state)
double _pexec_mlprmserrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void _mnlreport_init_copy(void *_dst, void *_src, ae_state *_state)
void clusterizercreate(clusterizerstate &s)
void mlpserializeold(multilayerperceptron *network, ae_vector *ra, ae_int_t *rlen, ae_state *_state)
void smp_mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, mlpreport &rep)
void _smlptrnsession_clear(void *_p)
void mlpecreate2(ae_int_t nin, ae_int_t nhid1, ae_int_t nhid2, ae_int_t nout, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void _clusterizerstate_clear(void *_p)
void clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d)
mlpreport & operator=(const mlpreport &rhs)
void mlpecreateb1(ae_int_t nin, ae_int_t nhid, ae_int_t nout, double b, double d, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, const ae_int_t nrestarts, mlpreport &rep)
void clusterizersetpoints(clusterizerstate *s, ae_matrix *xy, ae_int_t npoints, ae_int_t nfeatures, ae_int_t disttype, ae_state *_state)
void _mlpparallelizationcv_destroy(void *_p)
void smp_mlpallerrorssparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep)
void _mcpdstate_destroy(void *_p)
void mlpcreatec1(ae_int_t nin, ae_int_t nhid, ae_int_t nout, multilayerperceptron *network, ae_state *_state)
void mcpdsettikhonovregularizer(mcpdstate *s, double v, ae_state *_state)
alglib_impl::mlpensemble * c_ptr()
double mlperrorsubset(multilayerperceptron *network, ae_matrix *xy, ae_int_t setsize, ae_vector *subset, ae_int_t subsetsize, ae_state *_state)
mcpdstate(const mcpdstate &rhs)
double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void mlpcreateb2(ae_int_t nin, ae_int_t nhid1, ae_int_t nhid2, ae_int_t nout, double b, double d, multilayerperceptron *network, ae_state *_state)
ae_bool mlpcontinuetraining(mlptrainer *s, multilayerperceptron *network, ae_state *_state)
_mlpreport_owner & operator=(const _mlpreport_owner &rhs)
alglib_impl::dfreport * p_struct
void mlptrainnetwork(mlptrainer *s, multilayerperceptron *network, ae_int_t nrestarts, mlpreport *rep, ae_state *_state)
kmeansreport & operator=(const kmeansreport &rhs)
ae_int_t mlpgetlayerscount(multilayerperceptron *network, ae_state *_state)
void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const ae_int_t npoints, const bool isupper)
ae_int_t mlpclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void _kmeansreport_destroy(void *_p)
void _decisionforest_destroy(void *_p)
virtual ~mlpcvreport()
logitmodel(const logitmodel &rhs)
double _pexec_mlpavgrelerrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void mlpeserialize(ae_serializer *s, mlpensemble *ensemble, ae_state *_state)
void _kmeansbuffers_destroy(void *_p)
void clusterizerseparatedbydist(const ahcreport &rep, const double r, ae_int_t &k, integer_1d_array &cidx, integer_1d_array &cz)
void mlpcopy(multilayerperceptron *network1, multilayerperceptron *network2, ae_state *_state)
void _ahcreport_init(void *_p, ae_state *_state)
ae_bool mlpsamearchitecture(multilayerperceptron *network1, multilayerperceptron *network2, ae_state *_state)
void _smlptrnsession_init_copy(void *_dst, void *_src, ae_state *_state)
void _mlpensemble_destroy(void *_p)
ae_int_t mlpgradsplitcost(ae_state *_state)
void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const double r, ae_int_t &info, decisionforest &df, dfreport &rep)
alglib_impl::dfreport * c_ptr()
bool mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &network)
void mlpcopytunableparameters(const multilayerperceptron &network1, const multilayerperceptron &network2)
void mlprandomizefull(const multilayerperceptron &network)
void clusterizergetdistances(ae_matrix *xy, ae_int_t npoints, ae_int_t nfeatures, ae_int_t disttype, ae_matrix *d, ae_state *_state)
double dfrelclserror(decisionforest *df, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void dfprocess(const decisionforest &df, const real_1d_array &x, real_1d_array &y)
double mlprelclserror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mlpcreate0(ae_int_t nin, ae_int_t nout, multilayerperceptron *network, ae_state *_state)
real_2d_array c
Definition: dataanalysis.h:493
void clusterizersetdistances(clusterizerstate *s, ae_matrix *d, ae_int_t npoints, ae_bool isupper, ae_state *_state)
void _mlpcvreport_init(void *_p, ae_state *_state)
void mlpgradbatchsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad)
void mlpcreatetrainercls(ae_int_t nin, ae_int_t nclasses, mlptrainer *s, ae_state *_state)
void _lrreport_clear(void *_p)
void _pexec_pcabuildbasis(ae_matrix *x, ae_int_t npoints, ae_int_t nvars, ae_int_t *info, ae_vector *s2, ae_matrix *v, ae_state *_state)
double smp_mlprelclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
double & cvavgerror
Definition: dataanalysis.h:498
void _lrreport_init_copy(void *_dst, void *_src, ae_state *_state)
void clusterizerrunahc(clusterizerstate *s, ahcreport *rep, ae_state *_state)
double & oobavgerror
void _smlpgrad_clear(void *_p)
void mlpsetalgobatch(const mlptrainer &s)
void mlpsetneuroninfo(multilayerperceptron *network, ae_int_t k, ae_int_t i, ae_int_t fkind, double threshold, ae_state *_state)
void clusterizerseparatedbydist(ahcreport *rep, double r, ae_int_t *k, ae_vector *cidx, ae_vector *cz, ae_state *_state)
double mlperrorsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize)
void mlpecreater0(ae_int_t nin, ae_int_t nout, double a, double b, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void _modelerrors_init(void *_p, ae_state *_state)
void mcpdsettikhonovregularizer(const mcpdstate &s, const double v)
ae_int_t mlpgetinputscount(const multilayerperceptron &network)
void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep)
void dfunserialize(ae_serializer *s, decisionforest *forest, ae_state *_state)
void mlpsetdataset(mlptrainer *s, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double dfavgrelerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints)
void mlpecreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble)
void mlpkfoldcvlbfgs(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, double decay, ae_int_t restarts, double wstep, ae_int_t maxits, ae_int_t foldscount, ae_int_t *info, mlpreport *rep, mlpcvreport *cvrep, ae_state *_state)
void mlpecreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble)
double smp_mlpavgerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
void _mlpensemble_clear(void *_p)
void _mcpdstate_clear(void *_p)
mlpreport(const mlpreport &rhs)
void clusterizerseparatedbycorr(const ahcreport &rep, const double r, ae_int_t &k, integer_1d_array &cidx, integer_1d_array &cz)
void mcpdcreate(const ae_int_t n, mcpdstate &s)
void mcpdresults(mcpdstate *s, ae_matrix *p, mcpdreport *rep, ae_state *_state)
double smp_mlpavgcesparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype)
_multilayerperceptron_owner & operator=(const _multilayerperceptron_owner &rhs)
void mlpecreatefromnetwork(const multilayerperceptron &network, const ae_int_t ensemblesize, mlpensemble &ensemble)
double smp_mlperror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void smp_mlpgradbatchsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad)
mlpensemble(const mlpensemble &rhs)
mlpcvreport(const mlpcvreport &rhs)
void _kmeansreport_init_copy(void *_dst, void *_src, ae_state *_state)
void mlpcreate1(ae_int_t nin, ae_int_t nhid, ae_int_t nout, multilayerperceptron *network, ae_state *_state)
void _mlpcvreport_init_copy(void *_dst, void *_src, ae_state *_state)
void smp_mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep)
void mcpdsetec(const mcpdstate &s, const real_2d_array &ec)
integer_2d_array z
Definition: dataanalysis.h:921
void lrbuildzs(const real_2d_array &xy, const real_1d_array &s, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar)
void mlpunserialize(ae_serializer *s, multilayerperceptron *network, ae_state *_state)
void mlpgetneuroninfo(multilayerperceptron *network, ae_int_t k, ae_int_t i, ae_int_t *fkind, double *threshold, ae_state *_state)
_linearmodel_owner & operator=(const _linearmodel_owner &rhs)
void mlpallerrorssparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep)
double mlprmserror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
mnlreport & operator=(const mnlreport &rhs)
ae_int_t & terminationtype
Definition: dataanalysis.h:986
void dfbuildrandomdecisionforest(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t nclasses, ae_int_t ntrees, double r, ae_int_t *info, decisionforest *df, dfreport *rep, ae_state *_state)
void _linearmodel_destroy(void *_p)
void mlptrainlm(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, double decay, ae_int_t restarts, ae_int_t *info, mlpreport *rep, ae_state *_state)
void mlpcreate0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network)
void mlptraines(multilayerperceptron *network, ae_matrix *trnxy, ae_int_t trnsize, ae_matrix *valxy, ae_int_t valsize, double decay, ae_int_t restarts, ae_int_t *info, mlpreport *rep, ae_state *_state)
mlpcvreport & operator=(const mlpcvreport &rhs)
void clusterizergetkclusters(const ahcreport &rep, const ae_int_t k, integer_1d_array &cidx, integer_1d_array &cz)
void mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, mlpreport &rep)
void mlpcopy(const multilayerperceptron &network1, multilayerperceptron &network2)
double smp_mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad)
ae_int_t mlpgetlayerscount(const multilayerperceptron &network)
alglib_impl::logitmodel * c_ptr()
void _pexec_clusterizerrunkmeans(clusterizerstate *s, ae_int_t k, kmeansreport *rep, ae_state *_state)
double mnlrmserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints)
virtual ~lrreport()
void mlpgradbatchsparsesubset(multilayerperceptron *network, sparsematrix *xy, ae_int_t setsize, ae_vector *idx, ae_int_t subsetsize, double *e, ae_vector *grad, ae_state *_state)
virtual ~mlptrainer()
double mlpavgrelerrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
ae_int_t mlpgetinputscount(multilayerperceptron *network, ae_state *_state)
double mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize)
void mlperandomize(const mlpensemble &ensemble)
double dfrmserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints)
ae_int_t mlpgetweightscount(multilayerperceptron *network, ae_state *_state)
void clusterizerrunahc(const clusterizerstate &s, ahcreport &rep)
void _ahcreport_init_copy(void *_dst, void *_src, ae_state *_state)
void mlpinitpreprocessorsubset(multilayerperceptron *network, ae_matrix *xy, ae_int_t setsize, ae_vector *idx, ae_int_t subsetsize, ae_state *_state)
double mlpeavgrelerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints)
void _logitmcstate_init_copy(void *_dst, void *_src, ae_state *_state)
multilayerperceptron network
Definition: dataanalysis.h:229
double lravgrelerror(linearmodel *lm, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void smp_mlpgradbatchsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad)
double mlpavgce(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void lrlines(ae_matrix *xy, ae_vector *s, ae_int_t n, ae_int_t *info, double *a, double *b, double *vara, double *varb, double *covab, double *corrab, double *p, ae_state *_state)
ae_int_t mlpgetlayersize(const multilayerperceptron &network, const ae_int_t k)
void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_array &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int_t &info, double &threshold, double &rms, double &cvrms)
void mcpdsetpredictionweights(const mcpdstate &s, const real_1d_array &pw)
void mlpgradbatchsubset(multilayerperceptron *network, ae_matrix *xy, ae_int_t setsize, ae_vector *idx, ae_int_t subsetsize, double *e, ae_vector *grad, ae_state *_state)
double mlpeavgrelerror(mlpensemble *ensemble, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void _multilayerperceptron_destroy(void *_p)
double mlperror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void dfbuildrandomdecisionforestx1(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t nclasses, ae_int_t ntrees, ae_int_t nrndvars, double r, ae_int_t *info, decisionforest *df, dfreport *rep, ae_state *_state)
void _mlpreport_init(void *_p, ae_state *_state)
double mlpeavgerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints)
double mlprmserrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void _lrreport_init(void *_p, ae_state *_state)
double _pexec_mlpavgrelerror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mnlprocessi(const logitmodel &lm, const real_1d_array &x, real_1d_array &y)
void _multilayerperceptron_init(void *_p, ae_state *_state)
real_1d_array mergedist
Definition: dataanalysis.h:924
void mcpdsolve(const mcpdstate &s)
void dfserialize(decisionforest &obj, std::string &s_out)
void mlpcreateb0(ae_int_t nin, ae_int_t nout, double b, double d, multilayerperceptron *network, ae_state *_state)
mcpdstate & operator=(const mcpdstate &rhs)
void _clusterizerstate_init(void *_p, ae_state *_state)
void mcpdsetlc(mcpdstate *s, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
ae_int_t & inneriterationscount
Definition: dataanalysis.h:627
lrreport & operator=(const lrreport &rhs)
void mlpsetsparsedataset(const mlptrainer &s, const sparsematrix &xy, const ae_int_t npoints)
double mnlavgerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints)
void mlpinitpreprocessor(multilayerperceptron *network, ae_matrix *xy, ae_int_t ssize, ae_state *_state)
void mcpdsetbc(mcpdstate *s, ae_matrix *bndl, ae_matrix *bndu, ae_state *_state)
alglib_impl::mlptrainer * p_struct
Definition: dataanalysis.h:758
void mlpsetdecay(const mlptrainer &s, const double decay)
void mcpdresults(const mcpdstate &s, real_2d_array &p, mcpdreport &rep)
void clusterizersetkmeanslimits(clusterizerstate *s, ae_int_t restarts, ae_int_t maxits, ae_state *_state)
alglib_impl::linearmodel * c_ptr()
_mlpensemble_owner & operator=(const _mlpensemble_owner &rhs)
double & relclserror
Definition: dataanalysis.h:694
void mlpsetsparsedataset(mlptrainer *s, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void clusterizersetkmeansinit(clusterizerstate *s, ae_int_t initalgo, ae_state *_state)
void _mlpcvreport_clear(void *_p)
void mlpserialize(multilayerperceptron &obj, std::string &s_out)
alglib_impl::ae_int_t ae_int_t
Definition: ap.h:965
alglib_impl::clusterizerstate * p_struct
Definition: dataanalysis.h:792
void mnlprocessi(logitmodel *lm, ae_vector *x, ae_vector *y, ae_state *_state)
void _dfinternalbuffers_init_copy(void *_dst, void *_src, ae_state *_state)
void dserrfinish(ae_vector *buf, ae_state *_state)
void mlpimporttunableparameters(multilayerperceptron *network, ae_vector *p, ae_state *_state)
double & avgrelerror
void mlpsetweight(multilayerperceptron *network, ae_int_t k0, ae_int_t i0, ae_int_t k1, ae_int_t i1, double w, ae_state *_state)
void _pexec_mlpgradbatchsubset(multilayerperceptron *network, ae_matrix *xy, ae_int_t setsize, ae_vector *idx, ae_int_t subsetsize, double *e, ae_vector *grad, ae_state *_state)
void mlpgrad(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad)
linearmodel & operator=(const linearmodel &rhs)
void mlphessianbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h)
void mcpdsetec(mcpdstate *s, ae_matrix *ec, ae_state *_state)
void mlprandomizefull(multilayerperceptron *network, ae_state *_state)
void mlpeprocess(mlpensemble *ensemble, ae_vector *x, ae_vector *y, ae_state *_state)
alglib_impl::ahcreport * p_struct
void _dfreport_clear(void *_p)
void mlpsetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1, const double w)
void _mlpetrnsession_init_copy(void *_dst, void *_src, ae_state *_state)
void mlpecreate0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble)
ae_int_t & ncvdefects
Definition: dataanalysis.h:500
void smp_mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, const ae_int_t nrestarts, mlpreport &rep)
void clusterizersetkmeansinit(const clusterizerstate &s, const ae_int_t initalgo)
void mlpealloc(ae_serializer *s, mlpensemble *ensemble, ae_state *_state)
void mlpprocessi(multilayerperceptron *network, ae_vector *x, ae_vector *y, ae_state *_state)
void _mlpetrnsession_init(void *_p, ae_state *_state)
void mlpcreatetrainer(ae_int_t nin, ae_int_t nout, mlptrainer *s, ae_state *_state)
double smp_mlperrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
ae_int_t & nhess
Definition: dataanalysis.h:700
void mlpgetoutputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma)
double mlpgetweight(multilayerperceptron *network, ae_int_t k0, ae_int_t i0, ae_int_t k1, ae_int_t i1, ae_state *_state)
void filterema(real_1d_array &x, const ae_int_t n, const double alpha)
void mlpgradbatch(multilayerperceptron *network, ae_matrix *xy, ae_int_t ssize, double *e, ae_vector *grad, ae_state *_state)
void _mlptrainer_init_copy(void *_dst, void *_src, ae_state *_state)
alglib_impl::modelerrors * c_ptr()
void lrbuild(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar)
double _pexec_mlpavgce(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void lrcopy(linearmodel *lm1, linearmodel *lm2, ae_state *_state)
void _logitmcstate_init(void *_p, ae_state *_state)
double mnlavgce(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints)
mcpdreport & operator=(const mcpdreport &rhs)
void clusterizersetkmeanslimits(const clusterizerstate &s, const ae_int_t restarts, const ae_int_t maxits)
void mlpecreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble)
double lrprocess(linearmodel *lm, ae_vector *x, ae_state *_state)
void clusterizersetahcalgo(const clusterizerstate &s, const ae_int_t algo)
_mnlreport_owner & operator=(const _mnlreport_owner &rhs)
void mlpcreater2(ae_int_t nin, ae_int_t nhid1, ae_int_t nhid2, ae_int_t nout, double a, double b, multilayerperceptron *network, ae_state *_state)
ae_int_t & ngrad
Definition: dataanalysis.h:557
void lrbuilds(ae_matrix *xy, ae_vector *s, ae_int_t npoints, ae_int_t nvars, ae_int_t *info, linearmodel *lm, lrreport *ar, ae_state *_state)
void mlpgetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, ae_int_t &fkind, double &threshold)
void mlpcreateb1(ae_int_t nin, ae_int_t nhid, ae_int_t nout, double b, double d, multilayerperceptron *network, ae_state *_state)
_mlpcvreport_owner & operator=(const _mlpcvreport_owner &rhs)
double dfrmserror(decisionforest *df, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void _logitmcstate_clear(void *_p)
void clusterizergetkclusters(ahcreport *rep, ae_int_t k, ae_vector *cidx, ae_vector *cz, ae_state *_state)
bool mlpissoftmax(const multilayerperceptron &network)
void mlphessiannbatch(multilayerperceptron *network, ae_matrix *xy, ae_int_t ssize, double *e, ae_vector *grad, ae_matrix *h, ae_state *_state)
void filterlrma(ae_vector *x, ae_int_t n, ae_int_t k, ae_state *_state)
void mcpdcreateentryexit(const ae_int_t n, const ae_int_t entrystate, const ae_int_t exitstate, mcpdstate &s)
void _modelerrors_destroy(void *_p)
double _pexec_mlprelclserrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
ae_int_t mnlclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints)
alglib_impl::mlpcvreport * c_ptr()
void mlpsetcond(const mlptrainer &s, const double wstep, const ae_int_t maxits)
double mlpavgrelerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
_mcpdstate_owner & operator=(const _mcpdstate_owner &rhs)
_mcpdreport_owner & operator=(const _mcpdreport_owner &rhs)
ahcreport(const ahcreport &rhs)
double mnlavgce(logitmodel *lm, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double dfavgrelerror(decisionforest *df, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void _smlptrnsession_init(void *_p, ae_state *_state)
void mlpserialize(ae_serializer *s, multilayerperceptron *network, ae_state *_state)
void mlpgradbatchsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t ssize, double *e, ae_vector *grad, ae_state *_state)
double & oobrelclserror
void mlpebagginglbfgs(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors)
void mcpdaddbc(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double bndl, const double bndu)
void _logitmodel_destroy(void *_p)
void mlpinitpreprocessorsparsesubset(multilayerperceptron *network, sparsematrix *xy, ae_int_t setsize, ae_vector *idx, ae_int_t subsetsize, ae_state *_state)
void mlpecreatec0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble)
void _cvreport_init_copy(void *_dst, void *_src, ae_state *_state)
void dserrallocate(ae_int_t nclasses, ae_vector *buf, ae_state *_state)
void mlpsetinputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma)
void mlpetraines(mlpensemble *ensemble, ae_matrix *xy, ae_int_t npoints, double decay, ae_int_t restarts, ae_int_t *info, mlpreport *rep, ae_state *_state)
void _mnlreport_init(void *_p, ae_state *_state)
void _ahcreport_destroy(void *_p)
ae_int_t & iterationscount
Definition: dataanalysis.h:987
void mcpdcreateexit(const ae_int_t n, const ae_int_t exitstate, mcpdstate &s)
double smp_mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize)
double smp_mlperrorsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize)
double lrprocess(const linearmodel &lm, const real_1d_array &x)
double mnlrelclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints)
void mlpkfoldcvlm(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, double decay, ae_int_t restarts, ae_int_t foldscount, ae_int_t *info, mlpreport *rep, mlpcvreport *cvrep, ae_state *_state)
double _pexec_mlpavgcesparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void _dfreport_destroy(void *_p)
void mlpcreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, multilayerperceptron &network)
void mcpdaddec(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double c)
void dsoptimalsplitk(ae_vector *a, ae_vector *c, ae_int_t n, ae_int_t nc, ae_int_t kmax, ae_int_t *info, ae_vector *thresholds, ae_int_t *ni, double *cve, ae_state *_state)
_clusterizerstate_owner & operator=(const _clusterizerstate_owner &rhs)
void smp_clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d)
void _kmeansreport_init(void *_p, ae_state *_state)
virtual ~mlpreport()
void mnltrainh(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, logitmodel &lm, mnlreport &rep)
void mlpeserialize(mlpensemble &obj, std::string &s_out)
void mlpalloc(ae_serializer *s, multilayerperceptron *network, ae_state *_state)
void smp_mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad)
double smp_mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void _pexec_pcatruncatedsubspace(ae_matrix *x, ae_int_t npoints, ae_int_t nvars, ae_int_t nneeded, double eps, ae_int_t maxits, ae_vector *s2, ae_matrix *v, ae_state *_state)
void mnlpack(const real_2d_array &a, const ae_int_t nvars, const ae_int_t nclasses, logitmodel &lm)
void mlpstarttraining(const mlptrainer &s, const multilayerperceptron &network, const bool randomstart)
void _mlpcvreport_destroy(void *_p)
void mlpallerrorssparsesubset(multilayerperceptron *network, sparsematrix *xy, ae_int_t setsize, ae_vector *subset, ae_int_t subsetsize, modelerrors *rep, ae_state *_state)
ae_int_t mlpgetlayersize(multilayerperceptron *network, ae_int_t k, ae_state *_state)
ae_int_t & nhess
Definition: dataanalysis.h:558
dfreport & operator=(const dfreport &rhs)
void mlpsetinputscaling(multilayerperceptron *network, ae_int_t i, double mean, double sigma, ae_state *_state)
double & cvavgrelerror
Definition: dataanalysis.h:499
bool mlpeissoftmax(const mlpensemble &ensemble)
void mlpprocess(const multilayerperceptron &network, const real_1d_array &x, real_1d_array &y)
ae_int_t & outeriterationscount
Definition: dataanalysis.h:628
double mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
integer_1d_array p
Definition: dataanalysis.h:920
void lrunpack(const linearmodel &lm, real_1d_array &v, ae_int_t &nvars)
void lrpack(const real_1d_array &v, const ae_int_t nvars, linearmodel &lm)
void mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep)
double _pexec_mlperrorsparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
alglib_impl::modelerrors * p_struct
Definition: dataanalysis.h:398
virtual ~mcpdreport()
modelerrors & operator=(const modelerrors &rhs)
double _pexec_mlprelclserror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void _decisionforest_clear(void *_p)
ae_bool mlpeissoftmax(mlpensemble *ensemble, ae_state *_state)
void mlpallerrorsx(multilayerperceptron *network, ae_matrix *densexy, sparsematrix *sparsexy, ae_int_t datasetsize, ae_int_t datasettype, ae_vector *idx, ae_int_t subset0, ae_int_t subset1, ae_int_t subsettype, ae_shared_pool *buf, modelerrors *rep, ae_state *_state)
void smp_fisherldan(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_2d_array &w)
void mlpecreater2(ae_int_t nin, ae_int_t nhid1, ae_int_t nhid2, ae_int_t nout, double a, double b, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void _mcpdstate_init_copy(void *_dst, void *_src, ae_state *_state)
integer_2d_array pz
Definition: dataanalysis.h:922
void lrbuild(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t *info, linearmodel *lm, lrreport *ar, ae_state *_state)
_dfreport_owner & operator=(const _dfreport_owner &rhs)
void mlpexporttunableparameters(multilayerperceptron *network, ae_vector *p, ae_int_t *pcount, ae_state *_state)
alglib_impl::linearmodel * p_struct
Definition: dataanalysis.h:446
double dfavgce(decisionforest *df, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double mlperelclserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints)
void _dfinternalbuffers_init(void *_p, ae_state *_state)
double dfavgerror(decisionforest *df, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k)
void kmeansgenerate(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t k, ae_int_t restarts, ae_int_t *info, ae_matrix *c, ae_vector *xyc, ae_state *_state)
virtual ~mcpdstate()
double mlperrorsparsesubset(multilayerperceptron *network, sparsematrix *xy, ae_int_t setsize, ae_vector *subset, ae_int_t subsetsize, ae_state *_state)
void dsoptimalsplit2(const real_1d_array &a, const integer_1d_array &c, const ae_int_t n, ae_int_t &info, double &threshold, double &pal, double &pbl, double &par, double &pbr, double &cve)
ae_int_t mlpgetoutputscount(multilayerperceptron *network, ae_state *_state)
alglib_impl::mlpreport * c_ptr()
double mlpeavgce(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints)
void mlpeprocessi(mlpensemble *ensemble, ae_vector *x, ae_vector *y, ae_state *_state)
alglib_impl::mnlreport * p_struct
Definition: dataanalysis.h:552
void mlpecreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble)
double mnlavgrelerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t ssize)
void _pexec_clusterizergetdistances(ae_matrix *xy, ae_int_t npoints, ae_int_t nfeatures, ae_int_t disttype, ae_matrix *d, ae_state *_state)
double mlperrorn(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize)
void mlpsetalgobatch(mlptrainer *s, ae_state *_state)
double smp_mlpavgrelerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints)
void mnlcopy(logitmodel *lm1, logitmodel *lm2, ae_state *_state)
void _smlpgrad_init(void *_p, ae_state *_state)
void mnlprocess(const logitmodel &lm, const real_1d_array &x, real_1d_array &y)
void _mcpdreport_destroy(void *_p)
void mlpecreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble)
void mcpdsetbc(const mcpdstate &s, const real_2d_array &bndl, const real_2d_array &bndu)
double mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
void mlprandomize(const multilayerperceptron &network)
void mcpdaddbc(mcpdstate *s, ae_int_t i, ae_int_t j, double bndl, double bndu, ae_state *_state)
decisionforest & operator=(const decisionforest &rhs)
void mlpcreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network)
double mlpgetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1)
void _mlptrainer_clear(void *_p)
ae_bool mlpissoftmax(multilayerperceptron *network, ae_state *_state)
void _pexec_mlpallerrorssparsesubset(multilayerperceptron *network, sparsematrix *xy, ae_int_t setsize, ae_vector *subset, ae_int_t subsetsize, modelerrors *rep, ae_state *_state)
void fisherlda(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_1d_array &w)
ptrdiff_t ae_int_t
Definition: ap.h:185
void mlpeprocessi(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y)
double mnlrmserror(logitmodel *lm, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
double smp_mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints)
void mlphessiannbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h)
double _pexec_mlperror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void smp_pcabuildbasis(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, real_1d_array &s2, real_2d_array &v)
void mlpinitpreprocessor(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize)
void mnlpack(ae_matrix *a, ae_int_t nvars, ae_int_t nclasses, logitmodel *lm, ae_state *_state)
void lrunpack(linearmodel *lm, ae_vector *v, ae_int_t *nvars, ae_state *_state)
void mlpproperties(const multilayerperceptron &network, ae_int_t &nin, ae_int_t &nout, ae_int_t &wcount)
void mlpecreater1(ae_int_t nin, ae_int_t nhid, ae_int_t nout, double a, double b, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
void mlptraines(const multilayerperceptron &network, const real_2d_array &trnxy, const ae_int_t trnsize, const real_2d_array &valxy, const ae_int_t valsize, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep)
void mnltrainh(ae_matrix *xy, ae_int_t npoints, ae_int_t nvars, ae_int_t nclasses, ae_int_t *info, logitmodel *lm, mnlreport *rep, ae_state *_state)
ae_int_t mlpntotal(multilayerperceptron *network, ae_state *_state)
double mlpavgcesparse(multilayerperceptron *network, sparsematrix *xy, ae_int_t npoints, ae_state *_state)
void dfprocess(decisionforest *df, ae_vector *x, ae_vector *y, ae_state *_state)
void mlpkfoldcv(mlptrainer *s, multilayerperceptron *network, ae_int_t nrestarts, ae_int_t foldscount, mlpreport *rep, ae_state *_state)
lrreport(const lrreport &rhs)
void mcpdaddtrack(mcpdstate *s, ae_matrix *xy, ae_int_t k, ae_state *_state)
void lrpack(ae_vector *v, ae_int_t nvars, linearmodel *lm, ae_state *_state)
void mlpcreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network)
double _pexec_mlpavgerror(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, ae_state *_state)
void mlpeprocess(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y)
virtual ~modelerrors()
void kmeansupdatedistances(ae_matrix *xy, ae_int_t idx0, ae_int_t idx1, ae_int_t nvars, ae_matrix *ct, ae_int_t cidx0, ae_int_t cidx1, ae_vector *xyc, ae_vector *xydist2, ae_shared_pool *bufferpool, ae_state *_state)
void mlpeallerrorsx(mlpensemble *ensemble, ae_matrix *densexy, sparsematrix *sparsexy, ae_int_t datasetsize, ae_int_t datasettype, ae_vector *idx, ae_int_t subset0, ae_int_t subset1, ae_int_t subsettype, ae_shared_pool *buf, modelerrors *rep, ae_state *_state)
void mlpgetinputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma)
void _pexec_mlpgradbatchsparsesubset(multilayerperceptron *network, sparsematrix *xy, ae_int_t setsize, ae_vector *idx, ae_int_t subsetsize, double *e, ae_vector *grad, ae_state *_state)
virtual ~linearmodel()
ae_int_t & terminationtype
Definition: dataanalysis.h:918
void mlpecreatec2(ae_int_t nin, ae_int_t nhid1, ae_int_t nhid2, ae_int_t nout, ae_int_t ensemblesize, mlpensemble *ensemble, ae_state *_state)
virtual ~dfreport()
integer_1d_array cvdefects
Definition: dataanalysis.h:501
void _mnlreport_destroy(void *_p)
virtual ~ahcreport()
void mlptrainlbfgs(multilayerperceptron *network, ae_matrix *xy, ae_int_t npoints, double decay, ae_int_t restarts, double wstep, ae_int_t maxits, ae_int_t *info, mlpreport *rep, ae_state *_state)
void clusterizerrunkmeans(clusterizerstate *s, ae_int_t k, kmeansreport *rep, ae_state *_state)
alglib_impl::mnlreport * c_ptr()
void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info, real_2d_array &c, integer_1d_array &xyc)
alglib_impl::multilayerperceptron * c_ptr()
void _logitmcstate_destroy(void *_p)
void _mcpdreport_init_copy(void *_dst, void *_src, ae_state *_state)
ae_int_t mlpgetweightscount(const multilayerperceptron &network)
Page URL: http://wiki.math.ethz.ch/bin/view/Concepts/WebHome
21 August 2020
© 2020 Eidgenössische Technische Hochschule Zürich