optimization.h

Go to the documentation of this file.
1 /*************************************************************************
2 ALGLIB 3.11.0 (source code generated 2017-05-11)
3 Copyright (c) Sergey Bochkanov (ALGLIB project).
4 
5 >>> SOURCE LICENSE >>>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation (www.fsf.org); either version 2 of the
9 License, or (at your option) any later version.
10 
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15 
16 A copy of the GNU General Public License is available at
17 http://www.fsf.org/licensing/licenses
18 >>> END OF LICENSE >>>
19 *************************************************************************/
20 #ifndef _optimization_pkg_h
21 #define _optimization_pkg_h
22 #include "ap.h"
23 #include "alglibinternal.h"
24 #include "alglibmisc.h"
25 #include "linalg.h"
26 #include "solvers.h"
27 
29 //
30 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (DATATYPES)
31 //
33 namespace alglib_impl
34 {
35 typedef struct
36 {
37  ae_int_t n;
38  ae_int_t k;
39  double alpha;
40  double tau;
41  double theta;
42  ae_matrix a;
43  ae_matrix q;
44  ae_vector b;
45  ae_vector r;
46  ae_vector xc;
47  ae_vector d;
48  ae_vector activeset;
49  ae_matrix tq2dense;
50  ae_matrix tk2;
51  ae_vector tq2diag;
52  ae_vector tq1;
53  ae_vector tk1;
54  double tq0;
55  double tk0;
56  ae_vector txc;
57  ae_vector tb;
58  ae_int_t nfree;
59  ae_int_t ecakind;
60  ae_matrix ecadense;
61  ae_matrix eq;
62  ae_matrix eccm;
63  ae_vector ecadiag;
64  ae_vector eb;
65  double ec;
66  ae_vector tmp0;
67  ae_vector tmp1;
68  ae_vector tmpg;
69  ae_matrix tmp2;
70  ae_bool ismaintermchanged;
71  ae_bool issecondarytermchanged;
72  ae_bool islineartermchanged;
73  ae_bool isactivesetchanged;
75 typedef struct
76 {
77  ae_vector norms;
78  ae_vector alpha;
79  ae_vector rho;
81  ae_vector idx;
82  ae_vector bufa;
83  ae_vector bufb;
85 typedef struct
86 {
91  ae_vector bufc;
92  ae_matrix bufz;
93  ae_matrix bufw;
94  ae_vector tmp;
96 typedef struct
97 {
101  ae_matrix densea;
104  double debugflops;
105  ae_int_t debugmaxinnerits;
108  ae_matrix tmpca;
109  ae_matrix tmplq;
110  ae_matrix trda;
111  ae_vector trdd;
113  ae_vector g;
115  ae_vector dx;
116  ae_vector diagaa;
119  ae_vector cborg;
120  ae_vector tmpcholesky;
122  ae_vector regdiag;
123  ae_vector tmp0;
124  ae_vector tmp1;
125  ae_vector tmp2;
126  ae_vector rdtmprowmap;
128 typedef struct
129 {
131  ae_int_t algostate;
133  ae_bool hasxc;
136  ae_vector activeset;
137  ae_bool basisisready;
138  ae_matrix sbasis;
139  ae_matrix pbasis;
140  ae_matrix ibasis;
141  ae_int_t basissize;
142  ae_bool feasinitpt;
143  ae_bool constraintschanged;
144  ae_vector hasbndl;
145  ae_vector hasbndu;
146  ae_vector bndl;
147  ae_vector bndu;
148  ae_matrix cleic;
149  ae_int_t nec;
150  ae_int_t nic;
152  ae_vector mtas;
153  ae_vector cdtmp;
154  ae_vector corrtmp;
155  ae_vector unitdiagonal;
156  snnlssolver solver;
157  ae_vector scntmp;
158  ae_vector tmp0;
159  ae_vector tmpfeas;
160  ae_matrix tmpm0;
161  ae_vector rctmps;
162  ae_vector rctmpg;
163  ae_vector rctmprightpart;
164  ae_matrix rctmpdense0;
165  ae_matrix rctmpdense1;
166  ae_vector rctmpisequality;
167  ae_vector rctmpconstraintidx;
168  ae_vector rctmplambdas;
169  ae_matrix tmpbasis;
171 typedef struct
172 {
173  double epsg;
174  double epsf;
175  double epsx;
176  ae_int_t maxouterits;
177  ae_bool cgphase;
178  ae_bool cnphase;
179  ae_int_t cgminits;
180  ae_int_t cgmaxits;
181  ae_int_t cnmaxupdates;
182  ae_int_t sparsesolver;
184 typedef struct
185 {
187  ae_int_t nmain;
188  ae_int_t nslack;
189  ae_int_t nec;
190  ae_int_t nic;
191  ae_int_t akind;
192  ae_matrix densea;
193  sparsematrix sparsea;
194  ae_bool sparseupper;
195  double absamax;
196  double absasum;
197  double absasum2;
199  ae_vector bndl;
200  ae_vector bndu;
201  ae_vector havebndl;
202  ae_vector havebndu;
203  ae_matrix cleic;
213  ae_vector activated;
214  ae_int_t nfree;
215  ae_int_t cnmodelage;
216  ae_matrix densez;
217  sparsematrix sparsecca;
218  ae_vector yidx;
219  ae_vector regdiag;
220  ae_vector regx0;
221  ae_vector tmpcn;
222  ae_vector tmpcni;
223  ae_vector tmpcnb;
224  ae_vector tmp0;
225  ae_vector stpbuf;
227  ae_int_t repinneriterationscount;
228  ae_int_t repouteriterationscount;
229  ae_int_t repncholesky;
230  ae_int_t repncupdates;
232 typedef struct
233 {
236  double epsg;
237  double epsf;
238  double epsx;
239  ae_int_t maxits;
240  ae_bool xrep;
241  double stpmax;
243  double diffstep;
244  ae_int_t nfev;
245  ae_int_t mcstage;
249  ae_vector rho;
251  ae_matrix sk;
253  ae_vector theta;
255  double stp;
256  ae_vector work;
257  double fold;
258  double trimthreshold;
259  ae_int_t prectype;
260  double gammak;
261  ae_matrix denseh;
262  ae_vector diagh;
263  ae_vector precc;
264  ae_vector precd;
265  ae_matrix precw;
266  ae_int_t preck;
267  precbuflbfgs precbuf;
268  precbuflowrank lowrankbuf;
269  double fbase;
270  double fm2;
271  double fm1;
272  double fp1;
273  double fp2;
274  ae_vector autobuf;
276  double f;
278  ae_bool needf;
279  ae_bool needfg;
280  ae_bool xupdated;
281  ae_bool userterminationneeded;
282  double teststep;
283  rcommstate rstate;
284  ae_int_t repiterationscount;
285  ae_int_t repnfev;
286  ae_int_t repvaridx;
287  ae_int_t repterminationtype;
288  linminstate lstate;
290 typedef struct
291 {
292  ae_int_t iterationscount;
293  ae_int_t nfev;
294  ae_int_t varidx;
295  ae_int_t terminationtype;
297 typedef struct
298 {
299  double epsx;
300  ae_int_t outerits;
301  double rho;
303 typedef struct
304 {
305  ae_vector nulc;
306  ae_matrix sclsfta;
307  ae_vector sclsftb;
308  ae_vector sclsfthasbndl;
309  ae_vector sclsfthasbndu;
310  ae_vector sclsftbndl;
311  ae_vector sclsftbndu;
312  ae_vector sclsftxc;
313  ae_matrix sclsftcleic;
314  ae_matrix exa;
316  ae_vector exxc;
317  ae_vector exxn;
318  ae_vector exbndl;
319  ae_vector exbndu;
320  ae_vector exscale;
321  ae_vector exxorigin;
322  qqpsettings qqpsettingsuser;
323  qqpbuffers qqpbuf;
324  ae_vector nulcest;
325  ae_vector tmp0;
326  ae_matrix tmp2;
327  ae_vector modelg;
329  ae_vector deltax;
331  sparsematrix dummysparse;
332  ae_matrix qrkkt;
333  ae_vector qrrightpart;
334  ae_vector qrtau;
335  ae_vector qrsv0;
336  ae_vector qrsvx1;
337  ae_int_t repinneriterationscount;
338  ae_int_t repouteriterationscount;
339  ae_int_t repncholesky;
340  ae_int_t repnmv;
342 typedef struct
343 {
344  double epsg;
345  double epsf;
346  double epsx;
347  ae_int_t maxits;
349 typedef struct
350 {
356  ae_vector workbndl;
357  ae_vector workbndu;
358  ae_vector havebndl;
359  ae_vector havebndu;
360  ae_matrix workcleic;
361  ae_vector rctmpg;
362  ae_vector tmp0;
363  ae_vector tmp1;
364  ae_vector tmpb;
365  ae_int_t repinneriterationscount;
366  ae_int_t repouteriterationscount;
367  ae_int_t repncholesky;
369 typedef struct
370 {
372  double epsg;
373  double epsf;
374  double epsx;
375  ae_int_t maxits;
376  double stpmax;
377  double suggestedstep;
378  ae_bool xrep;
379  ae_bool drep;
380  ae_int_t cgtype;
381  ae_int_t prectype;
382  ae_vector diagh;
383  ae_vector diaghl2;
384  ae_matrix vcorr;
385  ae_int_t vcnt;
386  ae_vector s;
387  double diffstep;
388  ae_int_t nfev;
389  ae_int_t mcstage;
396  double fold;
397  double stp;
398  double curstpmax;
400  double lastgoodstep;
401  double lastscaledstep;
402  ae_int_t mcinfo;
403  ae_bool innerresetneeded;
404  ae_bool terminationneeded;
405  double trimthreshold;
406  ae_int_t rstimer;
408  double f;
410  ae_bool needf;
411  ae_bool needfg;
412  ae_bool xupdated;
413  ae_bool algpowerup;
414  ae_bool lsstart;
415  ae_bool lsend;
416  ae_bool userterminationneeded;
417  double teststep;
418  rcommstate rstate;
419  ae_int_t repiterationscount;
420  ae_int_t repnfev;
421  ae_int_t repvaridx;
422  ae_int_t repterminationtype;
423  ae_int_t debugrestartscount;
424  linminstate lstate;
425  double fbase;
426  double fm2;
427  double fm1;
428  double fp1;
429  double fp2;
430  double betahs;
431  double betady;
432  ae_vector work0;
433  ae_vector work1;
435 typedef struct
436 {
437  ae_int_t iterationscount;
438  ae_int_t nfev;
439  ae_int_t varidx;
440  ae_int_t terminationtype;
442 typedef struct
443 {
444  ae_int_t nmain;
445  ae_int_t nslack;
446  double epsg;
447  double epsf;
448  double epsx;
449  ae_int_t maxits;
450  ae_bool xrep;
451  ae_bool drep;
452  double stpmax;
453  double diffstep;
454  sactiveset sas;
456  ae_int_t prectype;
457  ae_vector diagh;
459  double f;
461  ae_bool needf;
462  ae_bool needfg;
463  ae_bool xupdated;
464  ae_bool lsstart;
465  ae_bool steepestdescentstep;
466  ae_bool boundedstep;
467  ae_bool userterminationneeded;
468  double teststep;
469  rcommstate rstate;
476  double fc;
477  double fn;
478  double fp;
480  ae_matrix cleic;
481  ae_int_t nec;
482  ae_int_t nic;
483  double lastgoodstep;
484  double lastscaledgoodstep;
485  double maxscaledgrad;
486  ae_vector hasbndl;
487  ae_vector hasbndu;
488  ae_vector bndl;
489  ae_vector bndu;
490  ae_int_t repinneriterationscount;
491  ae_int_t repouteriterationscount;
492  ae_int_t repnfev;
493  ae_int_t repvaridx;
494  ae_int_t repterminationtype;
495  double repdebugeqerr;
496  double repdebugfs;
497  double repdebugff;
498  double repdebugdx;
499  ae_int_t repdebugfeasqpits;
500  ae_int_t repdebugfeasgpaits;
501  ae_vector xstart;
502  snnlssolver solver;
503  double fbase;
504  double fm2;
505  double fm1;
506  double fp1;
507  double fp2;
508  double xm1;
509  double xp1;
510  double gm1;
511  double gp1;
512  ae_int_t cidx;
513  double cval;
514  ae_vector tmpprec;
515  ae_vector tmp0;
516  ae_int_t nfev;
517  ae_int_t mcstage;
518  double stp;
519  double curstpmax;
520  double activationstep;
521  ae_vector work;
522  linminstate lstate;
523  double trimthreshold;
524  ae_int_t nonmonotoniccnt;
525  ae_matrix bufyk;
526  ae_matrix bufsk;
527  ae_vector bufrho;
528  ae_vector buftheta;
529  ae_int_t bufsize;
531 typedef struct
532 {
533  ae_int_t iterationscount;
535  ae_int_t varidx;
536  ae_int_t terminationtype;
537  double debugeqerr;
538  double debugfs;
539  double debugff;
540  double debugdx;
541  ae_int_t debugfeasqpits;
542  ae_int_t debugfeasgpaits;
543  ae_int_t inneriterationscount;
544  ae_int_t outeriterationscount;
546 typedef struct
547 {
548  double epsg;
549  double epsf;
550  double epsx;
551  ae_int_t maxits;
553 typedef struct
554 {
556  minbleicreport solverrep;
557  ae_vector tmp0;
558  ae_vector tmp1;
559  ae_vector tmpi;
560  ae_int_t repinneriterationscount;
561  ae_int_t repouteriterationscount;
563 typedef struct
564 {
565  ae_int_t n;
566  qqpsettings qqpsettingsuser;
567  qpbleicsettings qpbleicsettingsuser;
568  qpdenseaulsettings qpdenseaulsettingsuser;
569  ae_bool dbgskipconstraintnormalization;
570  ae_int_t algokind;
571  ae_int_t akind;
573  sparsematrix sparsea;
574  ae_bool sparseaupper;
575  double absamax;
576  double absasum;
577  double absasum2;
579  ae_vector bndl;
580  ae_vector bndu;
582  ae_vector havebndl;
583  ae_vector havebndu;
584  ae_vector xorigin;
585  ae_vector startx;
586  ae_bool havex;
587  ae_matrix cleic;
588  ae_int_t nec;
589  ae_int_t nic;
590  sparsematrix scleic;
591  ae_int_t snec;
592  ae_int_t snic;
594  ae_int_t repinneriterationscount;
595  ae_int_t repouteriterationscount;
596  ae_int_t repncholesky;
597  ae_int_t repnmv;
598  ae_int_t repterminationtype;
599  ae_vector tmp0;
600  ae_matrix ecleic;
601  ae_matrix dummyr2;
602  ae_bool qpbleicfirstcall;
603  qpbleicbuffers qpbleicbuf;
604  qqpbuffers qqpbuf;
605  qpdenseaulbuffers qpdenseaulbuf;
606  qpcholeskybuffers qpcholeskybuf;
608 typedef struct
609 {
610  ae_int_t inneriterationscount;
611  ae_int_t outeriterationscount;
612  ae_int_t nmv;
613  ae_int_t ncholesky;
614  ae_int_t terminationtype;
616 typedef struct
617 {
618  double stabilizingpoint;
619  double initialinequalitymultiplier;
620  ae_int_t solvertype;
621  ae_int_t prectype;
622  ae_int_t updatefreq;
623  double rho;
625  double epsg;
626  double epsf;
627  double epsx;
628  ae_int_t maxits;
629  ae_int_t aulitscnt;
630  ae_bool xrep;
631  double stpmax;
632  double diffstep;
633  double teststep;
635  ae_vector bndl;
636  ae_vector bndu;
637  ae_vector hasbndl;
638  ae_vector hasbndu;
639  ae_int_t nec;
640  ae_int_t nic;
641  ae_matrix cleic;
645  double f;
648  ae_bool needfij;
649  ae_bool needfi;
650  ae_bool xupdated;
651  rcommstate rstate;
652  rcommstate rstateaul;
653  ae_vector scaledbndl;
654  ae_vector scaledbndu;
655  ae_matrix scaledcleic;
657  ae_vector xstart;
658  ae_vector xbase;
659  ae_vector fbase;
660  ae_vector dfbase;
665  ae_vector dfm1;
666  ae_vector dfp1;
667  ae_vector bufd;
668  ae_vector bufc;
669  ae_vector tmp0;
670  ae_matrix bufw;
671  ae_matrix bufz;
676  double gammak;
677  ae_bool xkpresent;
678  minlbfgsstate auloptimizer;
679  minlbfgsreport aulreport;
680  ae_vector nubc;
681  ae_vector nulc;
682  ae_vector nunlc;
683  ae_int_t repinneriterationscount;
684  ae_int_t repouteriterationscount;
685  ae_int_t repnfev;
686  ae_int_t repvaridx;
687  ae_int_t repfuncidx;
688  ae_int_t repterminationtype;
689  ae_int_t repdbgphase0its;
691 typedef struct
692 {
693  ae_int_t iterationscount;
694  ae_int_t nfev;
695  ae_int_t varidx;
696  ae_int_t funcidx;
697  ae_int_t terminationtype;
698  ae_int_t dbgphase0its;
700 typedef struct
701 {
702  ae_int_t nmain;
703  double epsg;
704  double epsf;
705  double epsx;
706  ae_int_t maxits;
707  ae_bool xrep;
708  double stpmax;
709  double diffstep;
710  ae_vector s;
711  ae_int_t prectype;
712  ae_vector diagh;
714  double f;
716  ae_bool needf;
717  ae_bool needfg;
718  ae_bool xupdated;
719  ae_bool userterminationneeded;
720  double teststep;
721  rcommstate rstate;
729  double fc;
730  double fn;
731  double fp;
733  double lastscaledgoodstep;
734  ae_vector hasbndl;
735  ae_vector hasbndu;
736  ae_vector bndl;
737  ae_vector bndu;
738  ae_int_t repiterationscount;
739  ae_int_t repnfev;
740  ae_int_t repvaridx;
741  ae_int_t repterminationtype;
742  ae_vector xstart;
743  snnlssolver solver;
744  double fbase;
745  double fm2;
746  double fm1;
747  double fp1;
748  double fp2;
749  double xm1;
750  double xp1;
751  double gm1;
752  double gp1;
753  ae_vector tmpprec;
754  ae_vector tmp0;
755  ae_int_t nfev;
756  ae_int_t mcstage;
757  double stp;
758  double curstpmax;
759  ae_vector work;
760  linminstate lstate;
761  double trimthreshold;
762  ae_int_t nonmonotoniccnt;
763  ae_matrix bufyk;
764  ae_matrix bufsk;
765  ae_vector bufrho;
766  ae_vector buftheta;
767  ae_int_t bufsize;
769 typedef struct
770 {
771  ae_int_t iterationscount;
772  ae_int_t nfev;
773  ae_int_t varidx;
774  ae_int_t terminationtype;
776 typedef struct
777 {
778  double fc;
779  double fn;
786  ae_matrix ch;
788  ae_vector invutc;
789  ae_vector tmp0;
790  ae_vector tmpidx;
791  ae_vector tmpd;
792  ae_vector tmpc;
793  ae_vector tmplambdas;
794  ae_matrix tmpc2;
795  ae_vector tmpb;
798 typedef struct
799 {
800  ae_int_t solvertype;
802  double epsx;
803  ae_int_t maxits;
804  ae_bool xrep;
805  double diffstep;
807  ae_vector bndl;
808  ae_vector bndu;
809  ae_vector hasbndl;
810  ae_vector hasbndu;
811  ae_int_t nec;
812  ae_int_t nic;
813  ae_matrix cleic;
815  ae_int_t nh;
817  double f;
820  ae_bool needfij;
821  ae_bool needfi;
822  ae_bool xupdated;
823  rcommstate rstate;
824  rcommstate rstateags;
825  hqrndstate agsrs;
826  double agsradius;
827  ae_int_t agssamplesize;
828  double agsraddecay;
829  double agsalphadecay;
830  double agsdecrease;
831  double agsinitstp;
832  double agsstattold;
833  double agsshortstpabs;
834  double agsshortstprel;
835  double agsshortf;
836  ae_int_t agsshortlimit;
837  double agsrhononlinear;
838  ae_int_t agsminupdate;
839  ae_int_t agsmaxraddecays;
840  ae_int_t agsmaxbacktrack;
841  ae_int_t agsmaxbacktracknonfull;
842  double agspenaltylevel;
843  double agspenaltyincrease;
844  ae_vector xstart;
849  ae_vector colmax;
850  ae_vector diagh;
851  ae_vector signmin;
852  ae_vector signmax;
853  ae_bool userterminationneeded;
854  ae_vector scaledbndl;
855  ae_vector scaledbndu;
856  ae_matrix scaledcleic;
857  ae_vector rholinear;
858  ae_matrix samplex;
859  ae_matrix samplegm;
860  ae_matrix samplegmbc;
861  ae_vector samplef;
862  ae_vector samplef0;
863  minnsqp nsqp;
864  ae_vector tmp0;
865  ae_vector tmp1;
866  ae_matrix tmp2;
867  ae_vector tmp3;
868  ae_vector xbase;
871  ae_int_t repinneriterationscount;
872  ae_int_t repouteriterationscount;
873  ae_int_t repnfev;
874  ae_int_t repvaridx;
875  ae_int_t repfuncidx;
876  ae_int_t repterminationtype;
877  double replcerr;
878  double repnlcerr;
879  ae_int_t dbgncholesky;
881 typedef struct
882 {
883  ae_int_t iterationscount;
884  ae_int_t nfev;
885  double cerr;
886  double lcerr;
887  double nlcerr;
888  ae_int_t terminationtype;
889  ae_int_t varidx;
890  ae_int_t funcidx;
892 typedef struct
893 {
895  double epsg;
896  double epsf;
897  double epsx;
898  ae_int_t maxits;
899  ae_bool xrep;
900  double stpmax;
901  ae_int_t cgtype;
903  ae_int_t nfev;
904  ae_int_t mcstage;
905  ae_vector bndl;
906  ae_vector bndu;
907  ae_int_t curalgo;
908  ae_int_t acount;
909  double mu;
910  double finit;
911  double dginit;
919  double fold;
920  double stp;
921  ae_vector work;
924  double laststep;
926  double f;
928  ae_bool needfg;
929  ae_bool xupdated;
930  rcommstate rstate;
931  ae_int_t repiterationscount;
932  ae_int_t repnfev;
933  ae_int_t repterminationtype;
934  ae_int_t debugrestartscount;
935  linminstate lstate;
936  double betahs;
937  double betady;
939 typedef struct
940 {
941  ae_int_t iterationscount;
942  ae_int_t nfev;
943  ae_int_t terminationtype;
944  ae_int_t activeconstraints;
946 typedef struct
947 {
950  double stpmax;
951  ae_int_t modelage;
952  ae_int_t maxmodelage;
953  ae_bool hasfi;
954  double epsx;
956  double f;
958  ae_bool needf;
959  ae_bool needfi;
960  double fbase;
961  ae_vector modeldiag;
962  ae_vector xbase;
963  ae_vector fibase;
964  ae_vector bndl;
965  ae_vector bndu;
966  ae_vector havebndl;
967  ae_vector havebndu;
969  rcommstate rstate;
970  ae_vector xdir;
971  ae_vector choleskybuf;
972  ae_vector tmp0;
973  ae_vector tmpct;
974  double actualdecrease;
975  double predicteddecrease;
976  minqpstate qpstate;
977  minqpreport qprep;
980 typedef struct
981 {
984  double diffstep;
985  double epsx;
986  ae_int_t maxits;
987  ae_bool xrep;
988  double stpmax;
989  ae_int_t maxmodelage;
990  ae_bool makeadditers;
992  double f;
997  ae_bool needf;
998  ae_bool needfg;
999  ae_bool needfgh;
1000  ae_bool needfij;
1001  ae_bool needfi;
1002  ae_bool xupdated;
1003  ae_bool userterminationneeded;
1004  ae_int_t algomode;
1005  ae_bool hasf;
1006  ae_bool hasfi;
1007  ae_bool hasg;
1008  ae_vector xbase;
1009  double fbase;
1010  ae_vector fibase;
1011  ae_vector gbase;
1012  ae_matrix quadraticmodel;
1015  ae_vector havebndl;
1016  ae_vector havebndu;
1018  ae_matrix cleic;
1021  double lambdav;
1022  double nu;
1023  ae_int_t modelage;
1026  ae_vector deltax;
1027  ae_vector deltaf;
1028  ae_bool deltaxready;
1029  ae_bool deltafready;
1030  double teststep;
1031  ae_int_t repiterationscount;
1032  ae_int_t repterminationtype;
1033  ae_int_t repfuncidx;
1034  ae_int_t repvaridx;
1035  ae_int_t repnfunc;
1036  ae_int_t repnjac;
1037  ae_int_t repngrad;
1038  ae_int_t repnhess;
1039  ae_int_t repncholesky;
1040  rcommstate rstate;
1041  ae_vector choleskybuf;
1043  double actualdecrease;
1044  double predicteddecrease;
1045  double xm1;
1046  double xp1;
1053  minlbfgsstate internalstate;
1054  minlbfgsreport internalrep;
1055  minqpstate qpstate;
1057  minlmstepfinder finderstate;
1059 typedef struct
1061  ae_int_t iterationscount;
1062  ae_int_t terminationtype;
1063  ae_int_t funcidx;
1064  ae_int_t varidx;
1065  ae_int_t nfunc;
1066  ae_int_t njac;
1067  ae_int_t ngrad;
1068  ae_int_t nhess;
1069  ae_int_t ncholesky;
1075 //
1076 // THIS SECTION CONTAINS C++ INTERFACE
1077 //
1079 namespace alglib
1088 
1089 
1090 
1091 
1092 /*************************************************************************
1093 
1094 *************************************************************************/
1096 {
1097 public:
1104 protected:
1106 };
1108 {
1109 public:
1113  virtual ~minlbfgsstate();
1117  double &f;
1120 
1121 };
1122 
1123 
1124 /*************************************************************************
1125 This structure stores optimization report:
1126 * IterationsCount total number of inner iterations
1127 * NFEV number of gradient evaluations
1128 * TerminationType termination type (see below)
1129 
1130 TERMINATION CODES
1131 
1132 TerminationType field contains completion code, which can be:
1133  -8 internal integrity control detected infinite or NAN values in
1134  function/gradient. Abnormal termination signalled.
1135  -7 gradient verification failed.
1136  See MinLBFGSSetGradientCheck() for more information.
1137  1 relative function improvement is no more than EpsF.
1138  2 relative step is no more than EpsX.
1139  4 gradient norm is no more than EpsG
1140  5 MaxIts steps was taken
1141  7 stopping conditions are too stringent,
1142  further improvement is impossible,
1143  X contains best point found so far.
1144  8 terminated by user who called minlbfgsrequesttermination().
1145  X contains point which was "current accepted" when termination
1146  request was submitted.
1147 
1148 Other fields of this structure are not documented and should not be used!
1149 *************************************************************************/
1151 {
1152 public:
1156  virtual ~_minlbfgsreport_owner();
1159 protected:
1161 };
1163 {
1164 public:
1168  virtual ~minlbfgsreport();
1173 
1174 };
1175 
1176 
1177 
1180 /*************************************************************************
1181 This object stores state of the nonlinear CG optimizer.
1183 You should use ALGLIB functions to work with this object.
1184 *************************************************************************/
1186 {
1187 public:
1191  virtual ~_mincgstate_owner();
1194 protected:
1196 };
1198 {
1199 public:
1201  mincgstate(const mincgstate &rhs);
1203  virtual ~mincgstate();
1207  double &f;
1210 
1211 };
1212 
1213 
1214 /*************************************************************************
1215 This structure stores optimization report:
1216 * IterationsCount total number of inner iterations
1217 * NFEV number of gradient evaluations
1218 * TerminationType termination type (see below)
1219 
1220 TERMINATION CODES
1221 
1222 TerminationType field contains completion code, which can be:
1223  -8 internal integrity control detected infinite or NAN values in
1224  function/gradient. Abnormal termination signalled.
1225  -7 gradient verification failed.
1226  See MinCGSetGradientCheck() for more information.
1227  1 relative function improvement is no more than EpsF.
1228  2 relative step is no more than EpsX.
1229  4 gradient norm is no more than EpsG
1230  5 MaxIts steps was taken
1231  7 stopping conditions are too stringent,
1232  further improvement is impossible,
1233  X contains best point found so far.
1234  8 terminated by user who called mincgrequesttermination(). X contains
1235  point which was "current accepted" when termination request was
1236  submitted.
1237 
1238 Other fields of this structure are not documented and should not be used!
1239 *************************************************************************/
1241 {
1242 public:
1246  virtual ~_mincgreport_owner();
1249 protected:
1251 };
1253 {
1254 public:
1258  virtual ~mincgreport();
1263 
1264 };
1265 
1266 /*************************************************************************
1267 This object stores nonlinear optimizer state.
1268 You should use functions provided by MinBLEIC subpackage to work with this
1269 object
1270 *************************************************************************/
1273 public:
1277  virtual ~_minbleicstate_owner();
1280 protected:
1282 };
1284 {
1285 public:
1289  virtual ~minbleicstate();
1293  double &f;
1296 
1297 };
1298 
1299 
1300 /*************************************************************************
1301 This structure stores optimization report:
1302 * IterationsCount number of iterations
1303 * NFEV number of gradient evaluations
1304 * TerminationType termination type (see below)
1305 
1306 TERMINATION CODES
1307 
1308 TerminationType field contains completion code, which can be:
1309  -8 internal integrity control detected infinite or NAN values in
1310  function/gradient. Abnormal termination signalled.
1311  -7 gradient verification failed.
1312  See MinBLEICSetGradientCheck() for more information.
1313  -3 inconsistent constraints. Feasible point is
1314  either nonexistent or too hard to find. Try to
1315  restart optimizer with better initial approximation
1316  1 relative function improvement is no more than EpsF.
1317  2 relative step is no more than EpsX.
1318  4 gradient norm is no more than EpsG
1319  5 MaxIts steps was taken
1320  7 stopping conditions are too stringent,
1321  further improvement is impossible,
1322  X contains best point found so far.
1323  8 terminated by user who called minbleicrequesttermination(). X contains
1324  point which was "current accepted" when termination request was
1325  submitted.
1326 
1327 ADDITIONAL FIELDS
1328 
1329 There are additional fields which can be used for debugging:
1330 * DebugEqErr error in the equality constraints (2-norm)
1331 * DebugFS f, calculated at projection of initial point
1332  to the feasible set
1333 * DebugFF f, calculated at the final point
1334 * DebugDX |X_start-X_final|
1335 *************************************************************************/
1337 {
1338 public:
1342  virtual ~_minbleicreport_owner();
1345 protected:
1347 };
1349 {
1350 public:
1354  virtual ~minbleicreport();
1359  double &debugeqerr;
1360  double &debugfs;
1361  double &debugff;
1362  double &debugdx;
1367 
1368 };
1369 
1370 
1371 
1372 /*************************************************************************
1373 This object stores nonlinear optimizer state.
1374 You should use functions provided by MinQP subpackage to work with this
1375 object
1376 *************************************************************************/
1379 public:
1383  virtual ~_minqpstate_owner();
1386 protected:
1388 };
1390 {
1391 public:
1393  minqpstate(const minqpstate &rhs);
1395  virtual ~minqpstate();
1396 
1397 };
1398 
1399 
1400 /*************************************************************************
1401 This structure stores optimization report:
1402 * InnerIterationsCount number of inner iterations
1403 * OuterIterationsCount number of outer iterations
1404 * NCholesky number of Cholesky decomposition
1405 * NMV number of matrix-vector products
1406  (only products calculated as part of iterative
1407  process are counted)
1408 * TerminationType completion code (see below)
1409 
1410 Completion codes:
1411 * -5 inappropriate solver was used:
1412  * QuickQP solver for problem with general linear constraints (dense/sparse)
1413 * -4 BLEIC-QP or QuickQP solver found unconstrained direction
1414  of negative curvature (function is unbounded from
1415  below even under constraints), no meaningful
1416  minimum can be found.
1417 * -3 inconsistent constraints (or, maybe, feasible point is
1418  too hard to find). If you are sure that constraints are feasible,
1419  try to restart optimizer with better initial approximation.
1420 * -1 solver error
1421 * 1..4 successful completion
1422 * 5 MaxIts steps was taken
1423 * 7 stopping conditions are too stringent,
1424  further improvement is impossible,
1425  X contains best point found so far.
1426 *************************************************************************/
1428 {
1429 public:
1433  virtual ~_minqpreport_owner();
1436 protected:
1438 };
1440 {
1441 public:
1445  virtual ~minqpreport();
1451 
1452 };
1453 
1454 /*************************************************************************
1455 This object stores nonlinear optimizer state.
1456 You should use functions provided by MinNLC subpackage to work with this
1457 object
1458 *************************************************************************/
1461 public:
1465  virtual ~_minnlcstate_owner();
1468 protected:
1470 };
1472 {
1473 public:
1477  virtual ~minnlcstate();
1481  double &f;
1485 
1486 };
1487 
1488 
1489 /*************************************************************************
1490 This structure stores optimization report:
1491 * IterationsCount total number of inner iterations
1492 * NFEV number of gradient evaluations
1493 * TerminationType termination type (see below)
1494 
1495 TERMINATION CODES
1496 
1497 TerminationType field contains completion code, which can be:
1498  -8 internal integrity control detected infinite or NAN values in
1499  function/gradient. Abnormal termination signalled.
1500  -7 gradient verification failed.
1501  See MinNLCSetGradientCheck() for more information.
1502  1 relative function improvement is no more than EpsF.
1503  2 relative step is no more than EpsX.
1504  4 gradient norm is no more than EpsG
1505  5 MaxIts steps was taken
1506  7 stopping conditions are too stringent,
1507  further improvement is impossible,
1508  X contains best point found so far.
1509 
1510 Other fields of this structure are not documented and should not be used!
1511 *************************************************************************/
1513 {
1514 public:
1518  virtual ~_minnlcreport_owner();
1521 protected:
1523 };
1525 {
1526 public:
1530  virtual ~minnlcreport();
1538 };
1540 /*************************************************************************
1541 This object stores nonlinear optimizer state.
1542 You should use functions provided by MinBC subpackage to work with this
1543 object
1544 *************************************************************************/
1546 {
1547 public:
1551  virtual ~_minbcstate_owner();
1554 protected:
1556 };
1558 {
1559 public:
1561  minbcstate(const minbcstate &rhs);
1563  virtual ~minbcstate();
1567  double &f;
1570 
1571 };
1572 
1573 
1574 /*************************************************************************
1575 This structure stores optimization report:
1576 * IterationsCount number of iterations
1577 * NFEV number of gradient evaluations
1578 * TerminationType termination type (see below)
1579 
1580 TERMINATION CODES
1581 
1582 TerminationType field contains completion code, which can be:
1583  -8 internal integrity control detected infinite or NAN values in
1584  function/gradient. Abnormal termination signalled.
1585  -7 gradient verification failed.
1586  See MinBCSetGradientCheck() for more information.
1587  -3 inconsistent constraints.
1588  1 relative function improvement is no more than EpsF.
1589  2 relative step is no more than EpsX.
1590  4 gradient norm is no more than EpsG
1591  5 MaxIts steps was taken
1592  7 stopping conditions are too stringent,
1593  further improvement is impossible,
1594  X contains best point found so far.
1595  8 terminated by user who called minbcrequesttermination(). X contains
1596  point which was "current accepted" when termination request was
1597  submitted.
1598 
1599 ADDITIONAL FIELDS
1600 
1601 There are additional fields which can be used for debugging:
1602 * DebugEqErr error in the equality constraints (2-norm)
1603 * DebugFS f, calculated at projection of initial point
1604  to the feasible set
1605 * DebugFF f, calculated at the final point
1606 * DebugDX |X_start-X_final|
1607 *************************************************************************/
1609 {
1610 public:
1614  virtual ~_minbcreport_owner();
1617 protected:
1619 };
1621 {
1622 public:
1626  virtual ~minbcreport();
1631 
1632 };
1633 
1634 /*************************************************************************
1635 This object stores nonlinear optimizer state.
1636 You should use functions provided by MinNS subpackage to work with this
1637 object
1638 *************************************************************************/
1640 {
1641 public:
1648 protected:
1650 };
1652 {
1653 public:
1655  minnsstate(const minnsstate &rhs);
1657  virtual ~minnsstate();
1661  double &f;
1665 
1666 };
1667 
1668 
1669 /*************************************************************************
1670 This structure stores optimization report:
1671 * IterationsCount total number of inner iterations
1672 * NFEV number of gradient evaluations
1673 * TerminationType termination type (see below)
1674 * CErr maximum violation of all types of constraints
1675 * LCErr maximum violation of linear constraints
1676 * NLCErr maximum violation of nonlinear constraints
1677 
1678 TERMINATION CODES
1679 
1680 TerminationType field contains completion code, which can be:
1681  -8 internal integrity control detected infinite or NAN values in
1682  function/gradient. Abnormal termination signalled.
1683  -3 box constraints are inconsistent
1684  -1 inconsistent parameters were passed:
1685  * penalty parameter for minnssetalgoags() is zero,
1686  but we have nonlinear constraints set by minnssetnlc()
1687  2 sampling radius decreased below epsx
1688  5 MaxIts steps was taken
1689  7 stopping conditions are too stringent,
1690  further improvement is impossible,
1691  X contains best point found so far.
1692  8 User requested termination via MinNSRequestTermination()
1693 
1694 Other fields of this structure are not documented and should not be used!
1695 *************************************************************************/
1697 {
1698 public:
1702  virtual ~_minnsreport_owner();
1705 protected:
1707 };
1709 {
1710 public:
1714  virtual ~minnsreport();
1717  double &cerr;
1718  double &lcerr;
1719  double &nlcerr;
1723 
1724 };
1726 /*************************************************************************
1728 *************************************************************************/
1731 public:
1738 protected:
1740 };
1742 {
1743 public:
1747  virtual ~minasastate();
1750  double &f;
1753 
1754 };
1755 
1756 
1757 /*************************************************************************
1758 
1759 *************************************************************************/
1761 {
1762 public:
1769 protected:
1771 };
1773 {
1774 public:
1778  virtual ~minasareport();
1783 
1784 };
1785 
1786 /*************************************************************************
1787 Levenberg-Marquardt optimizer.
1788 
1789 This structure should be created using one of the MinLMCreate???()
1790 functions. You should not access its fields directly; use ALGLIB functions
1791 to work with it.
1792 *************************************************************************/
1794 {
1795 public:
1799  virtual ~_minlmstate_owner();
1801  alglib_impl::minlmstate* c_ptr() const;
1802 protected:
1804 };
1807 public:
1809  minlmstate(const minlmstate &rhs);
1811  virtual ~minlmstate();
1818  double &f;
1824 
1825 };
1826 
1827 
1828 /*************************************************************************
1829 Optimization report, filled by MinLMResults() function
1830 
1831 FIELDS:
1832 * TerminationType, completetion code:
1833  * -8 optimizer detected NAN/INF values either in the function itself,
1834  or in its Jacobian
1835  * -7 derivative correctness check failed;
1836  see rep.funcidx, rep.varidx for
1837  more information.
1838  * -5 inappropriate solver was used:
1839  * solver created with minlmcreatefgh() used on problem with
1840  general linear constraints (set with minlmsetlc() call).
1841  * -3 constraints are inconsistent
1842  * 2 relative step is no more than EpsX.
1843  * 5 MaxIts steps was taken
1844  * 7 stopping conditions are too stringent,
1845  further improvement is impossible
1846  * 8 terminated by user who called MinLMRequestTermination().
1847  X contains point which was "current accepted" when termination
1848  request was submitted.
1849 * IterationsCount, contains iterations count
1850 * NFunc, number of function calculations
1851 * NJac, number of Jacobi matrix calculations
1852 * NGrad, number of gradient calculations
1853 * NHess, number of Hessian calculations
1854 * NCholesky, number of Cholesky decomposition calculations
1855 *************************************************************************/
1857 {
1858 public:
1862  virtual ~_minlmreport_owner();
1865 protected:
1867 };
1869 {
1870 public:
1874  virtual ~minlmreport();
1884 
1885 };
1892 
1894 
1895 
1896 
1897 /*************************************************************************
1898  LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION
1899 
1900 DESCRIPTION:
1901 The subroutine minimizes function F(x) of N arguments by using a quasi-
1902 Newton method (LBFGS scheme) which is optimized to use a minimum amount
1903 of memory.
1904 The subroutine generates the approximation of an inverse Hessian matrix by
1905 using information about the last M steps of the algorithm (instead of N).
1906 It lessens a required amount of memory from a value of order N^2 to a
1907 value of order 2*N*M.
1908 
1909 
1910 REQUIREMENTS:
1911 Algorithm will request following information during its operation:
1912 * function value F and its gradient G (simultaneously) at given point X
1913 
1914 
1915 USAGE:
1916 1. User initializes algorithm state with MinLBFGSCreate() call
1917 2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax()
1918  and other functions
1919 3. User calls MinLBFGSOptimize() function which takes algorithm state and
1920  pointer (delegate, etc.) to callback function which calculates F/G.
1921 4. User calls MinLBFGSResults() to get solution
1922 5. Optionally user may call MinLBFGSRestartFrom() to solve another problem
1923  with same N/M but another starting point and/or another function.
1924  MinLBFGSRestartFrom() allows to reuse already initialized structure.
1925 
1926 
1927 INPUT PARAMETERS:
1928  N - problem dimension. N>0
1929  M - number of corrections in the BFGS scheme of Hessian
1930  approximation update. Recommended value: 3<=M<=7. The smaller
1931  value causes worse convergence, the bigger will not cause a
1932  considerably better convergence, but will cause a fall in the
1933  performance. M<=N.
1934  X - initial solution approximation, array[0..N-1].
1935 
1936 
1937 OUTPUT PARAMETERS:
1938  State - structure which stores algorithm state
1939 
1940 
1941 NOTES:
1942 1. you may tune stopping conditions with MinLBFGSSetCond() function
1943 2. if target function contains exp() or other fast growing functions, and
1944  optimization algorithm makes too large steps which leads to overflow,
1945  use MinLBFGSSetStpMax() function to bound algorithm's steps. However,
1946  L-BFGS rarely needs such a tuning.
1947 
1948 
1949  -- ALGLIB --
1950  Copyright 02.04.2010 by Bochkanov Sergey
1951 *************************************************************************/
1952 void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlbfgsstate &state);
1953 void minlbfgscreate(const ae_int_t m, const real_1d_array &x, minlbfgsstate &state);
1954 
1955 
1956 /*************************************************************************
1957 The subroutine is finite difference variant of MinLBFGSCreate(). It uses
1958 finite differences in order to differentiate target function.
1959 
1960 Description below contains information which is specific to this function
1961 only. We recommend to read comments on MinLBFGSCreate() in order to get
1962 more information about creation of LBFGS optimizer.
1963 
1964 INPUT PARAMETERS:
1965  N - problem dimension, N>0:
1966  * if given, only leading N elements of X are used
1967  * if not given, automatically determined from size of X
1968  M - number of corrections in the BFGS scheme of Hessian
1969  approximation update. Recommended value: 3<=M<=7. The smaller
1970  value causes worse convergence, the bigger will not cause a
1971  considerably better convergence, but will cause a fall in the
1972  performance. M<=N.
1973  X - starting point, array[0..N-1].
1974  DiffStep- differentiation step, >0
1975 
1976 OUTPUT PARAMETERS:
1977  State - structure which stores algorithm state
1978 
1979 NOTES:
1980 1. algorithm uses 4-point central formula for differentiation.
1981 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
1982  S[] is scaling vector which can be set by MinLBFGSSetScale() call.
1983 3. we recommend you to use moderate values of differentiation step. Too
1984  large step will result in too large truncation errors, while too small
1985  step will result in too large numerical errors. 1.0E-6 can be good
1986  value to start with.
1987 4. Numerical differentiation is very inefficient - one gradient
1988  calculation needs 4*N function evaluations. This function will work for
1989  any N - either small (1...10), moderate (10...100) or large (100...).
1990  However, performance penalty will be too severe for any N's except for
1991  small ones.
1992  We should also say that code which relies on numerical differentiation
1993  is less robust and precise. LBFGS needs exact gradient values.
1994  Imprecise gradient may slow down convergence, especially on highly
1995  nonlinear problems.
1996  Thus we recommend to use this function for fast prototyping on small-
1997  dimensional problems only, and to implement analytical gradient as soon
1998  as possible.
1999 
2000  -- ALGLIB --
2001  Copyright 16.05.2011 by Bochkanov Sergey
2002 *************************************************************************/
2003 void minlbfgscreatef(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state);
2004 void minlbfgscreatef(const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state);
2005 
2006 
2007 /*************************************************************************
2008 This function sets stopping conditions for L-BFGS optimization algorithm.
2009 
2010 INPUT PARAMETERS:
2011  State - structure which stores algorithm state
2012  EpsG - >=0
2013  The subroutine finishes its work if the condition
2014  |v|<EpsG is satisfied, where:
2015  * |.| means Euclidian norm
2016  * v - scaled gradient vector, v[i]=g[i]*s[i]
2017  * g - gradient
2018  * s - scaling coefficients set by MinLBFGSSetScale()
2019  EpsF - >=0
2020  The subroutine finishes its work if on k+1-th iteration
2021  the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
2022  is satisfied.
2023  EpsX - >=0
2024  The subroutine finishes its work if on k+1-th iteration
2025  the condition |v|<=EpsX is fulfilled, where:
2026  * |.| means Euclidian norm
2027  * v - scaled step vector, v[i]=dx[i]/s[i]
2028  * dx - ste pvector, dx=X(k+1)-X(k)
2029  * s - scaling coefficients set by MinLBFGSSetScale()
2030  MaxIts - maximum number of iterations. If MaxIts=0, the number of
2031  iterations is unlimited.
2032 
2033 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
2034 automatic stopping criterion selection (small EpsX).
2035 
2036  -- ALGLIB --
2037  Copyright 02.04.2010 by Bochkanov Sergey
2038 *************************************************************************/
2039 void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
2040 
2041 
2042 /*************************************************************************
2043 This function turns on/off reporting.
2044 
2045 INPUT PARAMETERS:
2046  State - structure which stores algorithm state
2047  NeedXRep- whether iteration reports are needed or not
2048 
2049 If NeedXRep is True, algorithm will call rep() callback function if it is
2050 provided to MinLBFGSOptimize().
2051 
2052 
2053  -- ALGLIB --
2054  Copyright 02.04.2010 by Bochkanov Sergey
2055 *************************************************************************/
2056 void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep);
2057 
2058 
2059 /*************************************************************************
2060 This function sets maximum step length
2061 
2062 INPUT PARAMETERS:
2063  State - structure which stores algorithm state
2064  StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if
2065  you don't want to limit step length.
2066 
2067 Use this subroutine when you optimize target function which contains exp()
2068 or other fast growing functions, and optimization algorithm makes too
2069 large steps which leads to overflow. This function allows us to reject
2070 steps that are too large (and therefore expose us to the possible
2071 overflow) without actually calculating function value at the x+stp*d.
2072 
2073  -- ALGLIB --
2074  Copyright 02.04.2010 by Bochkanov Sergey
2075 *************************************************************************/
2076 void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax);
2077 
2078 
2079 /*************************************************************************
2080 This function sets scaling coefficients for LBFGS optimizer.
2081 
2082 ALGLIB optimizers use scaling matrices to test stopping conditions (step
2083 size and gradient are scaled before comparison with tolerances). Scale of
2084 the I-th variable is a translation invariant measure of:
2085 a) "how large" the variable is
2086 b) how large the step should be to make significant changes in the function
2087 
2088 Scaling is also used by finite difference variant of the optimizer - step
2089 along I-th axis is equal to DiffStep*S[I].
2090 
2091 In most optimizers (and in the LBFGS too) scaling is NOT a form of
2092 preconditioning. It just affects stopping conditions. You should set
2093 preconditioner by separate call to one of the MinLBFGSSetPrec...()
2094 functions.
2095 
2096 There is special preconditioning mode, however, which uses scaling
2097 coefficients to form diagonal preconditioning matrix. You can turn this
2098 mode on, if you want. But you should understand that scaling is not the
2099 same thing as preconditioning - these are two different, although related
2100 forms of tuning solver.
2101 
2102 INPUT PARAMETERS:
2103  State - structure stores algorithm state
2104  S - array[N], non-zero scaling coefficients
2105  S[i] may be negative, sign doesn't matter.
2106 
2107  -- ALGLIB --
2108  Copyright 14.01.2011 by Bochkanov Sergey
2109 *************************************************************************/
2110 void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s);
2111 
2112 
2113 /*************************************************************************
2114 Modification of the preconditioner: default preconditioner (simple
2115 scaling, same for all elements of X) is used.
2116 
2117 INPUT PARAMETERS:
2118  State - structure which stores algorithm state
2119 
2120 NOTE: you can change preconditioner "on the fly", during algorithm
2121 iterations.
2122 
2123  -- ALGLIB --
2124  Copyright 13.10.2010 by Bochkanov Sergey
2125 *************************************************************************/
2127 
2128 
2129 /*************************************************************************
2130 Modification of the preconditioner: Cholesky factorization of approximate
2131 Hessian is used.
2132 
2133 INPUT PARAMETERS:
2134  State - structure which stores algorithm state
2135  P - triangular preconditioner, Cholesky factorization of
2136  the approximate Hessian. array[0..N-1,0..N-1],
2137  (if larger, only leading N elements are used).
2138  IsUpper - whether upper or lower triangle of P is given
2139  (other triangle is not referenced)
2140 
2141 After call to this function preconditioner is changed to P (P is copied
2142 into the internal buffer).
2143 
2144 NOTE: you can change preconditioner "on the fly", during algorithm
2145 iterations.
2146 
2147 NOTE 2: P should be nonsingular. Exception will be thrown otherwise.
2148 
2149  -- ALGLIB --
2150  Copyright 13.10.2010 by Bochkanov Sergey
2151 *************************************************************************/
2152 void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_array &p, const bool isupper);
2153 
2154 
2155 /*************************************************************************
2156 Modification of the preconditioner: diagonal of approximate Hessian is
2157 used.
2158 
2159 INPUT PARAMETERS:
2160  State - structure which stores algorithm state
2161  D - diagonal of the approximate Hessian, array[0..N-1],
2162  (if larger, only leading N elements are used).
2163 
2164 NOTE: you can change preconditioner "on the fly", during algorithm
2165 iterations.
2166 
2167 NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
2168 
2169 NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
2170 
2171  -- ALGLIB --
2172  Copyright 13.10.2010 by Bochkanov Sergey
2173 *************************************************************************/
2174 void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d);
2175 
2176 
2177 /*************************************************************************
2178 Modification of the preconditioner: scale-based diagonal preconditioning.
2179 
2180 This preconditioning mode can be useful when you don't have approximate
2181 diagonal of Hessian, but you know that your variables are badly scaled
2182 (for example, one variable is in [1,10], and another in [1000,100000]),
2183 and most part of the ill-conditioning comes from different scales of vars.
2184 
2185 In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
2186 can greatly improve convergence.
2187 
2188 IMPRTANT: you should set scale of your variables with MinLBFGSSetScale()
2189 call (before or after MinLBFGSSetPrecScale() call). Without knowledge of
2190 the scale of your variables scale-based preconditioner will be just unit
2191 matrix.
2192 
2193 INPUT PARAMETERS:
2194  State - structure which stores algorithm state
2195 
2196  -- ALGLIB --
2197  Copyright 13.10.2010 by Bochkanov Sergey
2198 *************************************************************************/
2200 
2201 
2202 /*************************************************************************
2203 This function provides reverse communication interface
2204 Reverse communication interface is not documented or recommended to use.
2205 See below for functions which provide better documented API
2206 *************************************************************************/
2208 
2209 
2210 /*************************************************************************
2211 This family of functions is used to launcn iterations of nonlinear optimizer
2212 
2213 These functions accept following parameters:
2214  state - algorithm state
2215  func - callback which calculates function (or merit function)
2216  value func at given point x
2217  grad - callback which calculates function (or merit function)
2218  value func and gradient grad at given point x
2219  rep - optional callback which is called after each iteration
2220  can be NULL
2221  ptr - optional pointer which is passed to func/grad/hess/jac/rep
2222  can be NULL
2223 
2224 NOTES:
2225 
2226 1. This function has two different implementations: one which uses exact
2227  (analytical) user-supplied gradient, and one which uses function value
2228  only and numerically differentiates function in order to obtain
2229  gradient.
2230 
2231  Depending on the specific function used to create optimizer object
2232  (either MinLBFGSCreate() for analytical gradient or MinLBFGSCreateF()
2233  for numerical differentiation) you should choose appropriate variant of
2234  MinLBFGSOptimize() - one which accepts function AND gradient or one
2235  which accepts function ONLY.
2236 
2237  Be careful to choose variant of MinLBFGSOptimize() which corresponds to
2238  your optimization scheme! Table below lists different combinations of
2239  callback (function/gradient) passed to MinLBFGSOptimize() and specific
2240  function used to create optimizer.
2241 
2242 
2243  | USER PASSED TO MinLBFGSOptimize()
2244  CREATED WITH | function only | function and gradient
2245  ------------------------------------------------------------
2246  MinLBFGSCreateF() | work FAIL
2247  MinLBFGSCreate() | FAIL work
2248 
2249  Here "FAIL" denotes inappropriate combinations of optimizer creation
2250  function and MinLBFGSOptimize() version. Attemps to use such
2251  combination (for example, to create optimizer with MinLBFGSCreateF() and
2252  to pass gradient information to MinCGOptimize()) will lead to exception
2253  being thrown. Either you did not pass gradient when it WAS needed or
2254  you passed gradient when it was NOT needed.
2255 
2256  -- ALGLIB --
2257  Copyright 20.03.2009 by Bochkanov Sergey
2258 
2259 *************************************************************************/
2261  void (*func)(const real_1d_array &x, double &func, void *ptr),
2262  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
2263  void *ptr = NULL);
2265  void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
2266  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
2267  void *ptr = NULL);
2268 
2269 
2270 /*************************************************************************
2271 L-BFGS algorithm results
2272 
2273 INPUT PARAMETERS:
2274  State - algorithm state
2275 
2276 OUTPUT PARAMETERS:
2277  X - array[0..N-1], solution
2278  Rep - optimization report:
2279  * Rep.TerminationType completetion code:
2280  * -8 internal integrity control detected infinite
2281  or NAN values in function/gradient. Abnormal
2282  termination signalled.
2283  * -7 gradient verification failed.
2284  See MinLBFGSSetGradientCheck() for more information.
2285  * -2 rounding errors prevent further improvement.
2286  X contains best point found.
2287  * -1 incorrect parameters were specified
2288  * 1 relative function improvement is no more than
2289  EpsF.
2290  * 2 relative step is no more than EpsX.
2291  * 4 gradient norm is no more than EpsG
2292  * 5 MaxIts steps was taken
2293  * 7 stopping conditions are too stringent,
2294  further improvement is impossible
2295  * 8 terminated by user who called minlbfgsrequesttermination().
2296  X contains point which was "current accepted" when
2297  termination request was submitted.
2298  * Rep.IterationsCount contains iterations count
2299  * NFEV countains number of function calculations
2300 
2301  -- ALGLIB --
2302  Copyright 02.04.2010 by Bochkanov Sergey
2303 *************************************************************************/
2305 
2306 
2307 /*************************************************************************
2308 L-BFGS algorithm results
2309 
2310 Buffered implementation of MinLBFGSResults which uses pre-allocated buffer
2311 to store X[]. If buffer size is too small, it resizes buffer. It is
2312 intended to be used in the inner cycles of performance critical algorithms
2313 where array reallocation penalty is too large to be ignored.
2314 
2315  -- ALGLIB --
2316  Copyright 20.08.2010 by Bochkanov Sergey
2317 *************************************************************************/
2319 
2320 
2321 /*************************************************************************
2322 This subroutine restarts LBFGS algorithm from new point. All optimization
2323 parameters are left unchanged.
2324 
2325 This function allows to solve multiple optimization problems (which
2326 must have same number of dimensions) without object reallocation penalty.
2327 
2328 INPUT PARAMETERS:
2329  State - structure used to store algorithm state
2330  X - new starting point.
2331 
2332  -- ALGLIB --
2333  Copyright 30.07.2010 by Bochkanov Sergey
2334 *************************************************************************/
2335 void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x);
2336 
2337 
2338 /*************************************************************************
2339 This subroutine submits request for termination of running optimizer. It
2340 should be called from user-supplied callback when user decides that it is
2341 time to "smoothly" terminate optimization process. As result, optimizer
2342 stops at point which was "current accepted" when termination request was
2343 submitted and returns error code 8 (successful termination).
2344 
2345 INPUT PARAMETERS:
2346  State - optimizer structure
2347 
2348 NOTE: after request for termination optimizer may perform several
2349  additional calls to user-supplied callbacks. It does NOT guarantee
2350  to stop immediately - it just guarantees that these additional calls
2351  will be discarded later.
2352 
2353 NOTE: calling this function on optimizer which is NOT running will have no
2354  effect.
2355 
2356 NOTE: multiple calls to this function are possible. First call is counted,
2357  subsequent calls are silently ignored.
2358 
2359  -- ALGLIB --
2360  Copyright 08.10.2014 by Bochkanov Sergey
2361 *************************************************************************/
2363 
2364 
2365 /*************************************************************************
2366 This subroutine turns on verification of the user-supplied analytic
2367 gradient:
2368 * user calls this subroutine before optimization begins
2369 * MinLBFGSOptimize() is called
2370 * prior to actual optimization, for each component of parameters being
2371  optimized X[i] algorithm performs following steps:
2372  * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
2373  where X[i] is i-th component of the initial point and S[i] is a scale
2374  of i-th parameter
2375  * if needed, steps are bounded with respect to constraints on X[]
2376  * F(X) is evaluated at these trial points
2377  * we perform one more evaluation in the middle point of the interval
2378  * we build cubic model using function values and derivatives at trial
2379  points and we compare its prediction with actual value in the middle
2380  point
2381  * in case difference between prediction and actual value is higher than
2382  some predetermined threshold, algorithm stops with completion code -7;
2383  Rep.VarIdx is set to index of the parameter with incorrect derivative.
2384 * after verification is over, algorithm proceeds to the actual optimization.
2385 
2386 NOTE 1: verification needs N (parameters count) gradient evaluations. It
2387  is very costly and you should use it only for low dimensional
2388  problems, when you want to be sure that you've correctly
2389  calculated analytic derivatives. You should not use it in the
2390  production code (unless you want to check derivatives provided by
2391  some third party).
2392 
2393 NOTE 2: you should carefully choose TestStep. Value which is too large
2394  (so large that function behaviour is significantly non-cubic) will
2395  lead to false alarms. You may use different step for different
2396  parameters by means of setting scale with MinLBFGSSetScale().
2397 
2398 NOTE 3: this function may lead to false positives. In case it reports that
2399  I-th derivative was calculated incorrectly, you may decrease test
2400  step and try one more time - maybe your function changes too
2401  sharply and your step is too large for such rapidly chanding
2402  function.
2403 
2404 INPUT PARAMETERS:
2405  State - structure used to store algorithm state
2406  TestStep - verification step:
2407  * TestStep=0 turns verification off
2408  * TestStep>0 activates verification
2409 
2410  -- ALGLIB --
2411  Copyright 24.05.2012 by Bochkanov Sergey
2412 *************************************************************************/
2413 void minlbfgssetgradientcheck(const minlbfgsstate &state, const double teststep);
2414 
2415 
2416 
2417 
2418 
2419 /*************************************************************************
2420  NONLINEAR CONJUGATE GRADIENT METHOD
2421 
2422 DESCRIPTION:
2423 The subroutine minimizes function F(x) of N arguments by using one of the
2424 nonlinear conjugate gradient methods.
2425 
2426 These CG methods are globally convergent (even on non-convex functions) as
2427 long as grad(f) is Lipschitz continuous in a some neighborhood of the
2428 L = { x : f(x)<=f(x0) }.
2429 
2430 
2431 REQUIREMENTS:
2432 Algorithm will request following information during its operation:
2433 * function value F and its gradient G (simultaneously) at given point X
2434 
2435 
2436 USAGE:
2437 1. User initializes algorithm state with MinCGCreate() call
2438 2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and
2439  other functions
2440 3. User calls MinCGOptimize() function which takes algorithm state and
2441  pointer (delegate, etc.) to callback function which calculates F/G.
2442 4. User calls MinCGResults() to get solution
2443 5. Optionally, user may call MinCGRestartFrom() to solve another problem
2444  with same N but another starting point and/or another function.
2445  MinCGRestartFrom() allows to reuse already initialized structure.
2446 
2447 
2448 INPUT PARAMETERS:
2449  N - problem dimension, N>0:
2450  * if given, only leading N elements of X are used
2451  * if not given, automatically determined from size of X
2452  X - starting point, array[0..N-1].
2453 
2454 OUTPUT PARAMETERS:
2455  State - structure which stores algorithm state
2456 
2457  -- ALGLIB --
2458  Copyright 25.03.2010 by Bochkanov Sergey
2459 *************************************************************************/
2460 void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &state);
2461 void mincgcreate(const real_1d_array &x, mincgstate &state);
2462 
2463 
2464 /*************************************************************************
2465 The subroutine is finite difference variant of MinCGCreate(). It uses
2466 finite differences in order to differentiate target function.
2467 
2468 Description below contains information which is specific to this function
2469 only. We recommend to read comments on MinCGCreate() in order to get more
2470 information about creation of CG optimizer.
2471 
2472 INPUT PARAMETERS:
2473  N - problem dimension, N>0:
2474  * if given, only leading N elements of X are used
2475  * if not given, automatically determined from size of X
2476  X - starting point, array[0..N-1].
2477  DiffStep- differentiation step, >0
2478 
2479 OUTPUT PARAMETERS:
2480  State - structure which stores algorithm state
2481 
2482 NOTES:
2483 1. algorithm uses 4-point central formula for differentiation.
2484 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
2485  S[] is scaling vector which can be set by MinCGSetScale() call.
2486 3. we recommend you to use moderate values of differentiation step. Too
2487  large step will result in too large truncation errors, while too small
2488  step will result in too large numerical errors. 1.0E-6 can be good
2489  value to start with.
2490 4. Numerical differentiation is very inefficient - one gradient
2491  calculation needs 4*N function evaluations. This function will work for
2492  any N - either small (1...10), moderate (10...100) or large (100...).
2493  However, performance penalty will be too severe for any N's except for
2494  small ones.
2495  We should also say that code which relies on numerical differentiation
2496  is less robust and precise. L-BFGS needs exact gradient values.
2497  Imprecise gradient may slow down convergence, especially on highly
2498  nonlinear problems.
2499  Thus we recommend to use this function for fast prototyping on small-
2500  dimensional problems only, and to implement analytical gradient as soon
2501  as possible.
2502 
2503  -- ALGLIB --
2504  Copyright 16.05.2011 by Bochkanov Sergey
2505 *************************************************************************/
2506 void mincgcreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, mincgstate &state);
2507 void mincgcreatef(const real_1d_array &x, const double diffstep, mincgstate &state);
2508 
2509 
2510 /*************************************************************************
2511 This function sets stopping conditions for CG optimization algorithm.
2512 
2513 INPUT PARAMETERS:
2514  State - structure which stores algorithm state
2515  EpsG - >=0
2516  The subroutine finishes its work if the condition
2517  |v|<EpsG is satisfied, where:
2518  * |.| means Euclidian norm
2519  * v - scaled gradient vector, v[i]=g[i]*s[i]
2520  * g - gradient
2521  * s - scaling coefficients set by MinCGSetScale()
2522  EpsF - >=0
2523  The subroutine finishes its work if on k+1-th iteration
2524  the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
2525  is satisfied.
2526  EpsX - >=0
2527  The subroutine finishes its work if on k+1-th iteration
2528  the condition |v|<=EpsX is fulfilled, where:
2529  * |.| means Euclidian norm
2530  * v - scaled step vector, v[i]=dx[i]/s[i]
2531  * dx - ste pvector, dx=X(k+1)-X(k)
2532  * s - scaling coefficients set by MinCGSetScale()
2533  MaxIts - maximum number of iterations. If MaxIts=0, the number of
2534  iterations is unlimited.
2535 
2536 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to
2537 automatic stopping criterion selection (small EpsX).
2538 
2539  -- ALGLIB --
2540  Copyright 02.04.2010 by Bochkanov Sergey
2541 *************************************************************************/
2542 void mincgsetcond(const mincgstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
2543 
2544 
2545 /*************************************************************************
2546 This function sets scaling coefficients for CG optimizer.
2547 
2548 ALGLIB optimizers use scaling matrices to test stopping conditions (step
2549 size and gradient are scaled before comparison with tolerances). Scale of
2550 the I-th variable is a translation invariant measure of:
2551 a) "how large" the variable is
2552 b) how large the step should be to make significant changes in the function
2553 
2554 Scaling is also used by finite difference variant of CG optimizer - step
2555 along I-th axis is equal to DiffStep*S[I].
2556 
2557 In most optimizers (and in the CG too) scaling is NOT a form of
2558 preconditioning. It just affects stopping conditions. You should set
2559 preconditioner by separate call to one of the MinCGSetPrec...() functions.
2560 
2561 There is special preconditioning mode, however, which uses scaling
2562 coefficients to form diagonal preconditioning matrix. You can turn this
2563 mode on, if you want. But you should understand that scaling is not the
2564 same thing as preconditioning - these are two different, although related
2565 forms of tuning solver.
2566 
2567 INPUT PARAMETERS:
2568  State - structure stores algorithm state
2569  S - array[N], non-zero scaling coefficients
2570  S[i] may be negative, sign doesn't matter.
2571 
2572  -- ALGLIB --
2573  Copyright 14.01.2011 by Bochkanov Sergey
2574 *************************************************************************/
2575 void mincgsetscale(const mincgstate &state, const real_1d_array &s);
2576 
2577 
2578 /*************************************************************************
2579 This function turns on/off reporting.
2580 
2581 INPUT PARAMETERS:
2582  State - structure which stores algorithm state
2583  NeedXRep- whether iteration reports are needed or not
2584 
2585 If NeedXRep is True, algorithm will call rep() callback function if it is
2586 provided to MinCGOptimize().
2587 
2588  -- ALGLIB --
2589  Copyright 02.04.2010 by Bochkanov Sergey
2590 *************************************************************************/
2591 void mincgsetxrep(const mincgstate &state, const bool needxrep);
2592 
2593 
2594 /*************************************************************************
2595 This function sets CG algorithm.
2596 
2597 INPUT PARAMETERS:
2598  State - structure which stores algorithm state
2599  CGType - algorithm type:
2600  * -1 automatic selection of the best algorithm
2601  * 0 DY (Dai and Yuan) algorithm
2602  * 1 Hybrid DY-HS algorithm
2603 
2604  -- ALGLIB --
2605  Copyright 02.04.2010 by Bochkanov Sergey
2606 *************************************************************************/
2607 void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype);
2608 
2609 
2610 /*************************************************************************
2611 This function sets maximum step length
2612 
2613 INPUT PARAMETERS:
2614  State - structure which stores algorithm state
2615  StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
2616  want to limit step length.
2617 
2618 Use this subroutine when you optimize target function which contains exp()
2619 or other fast growing functions, and optimization algorithm makes too
2620 large steps which leads to overflow. This function allows us to reject
2621 steps that are too large (and therefore expose us to the possible
2622 overflow) without actually calculating function value at the x+stp*d.
2623 
2624  -- ALGLIB --
2625  Copyright 02.04.2010 by Bochkanov Sergey
2626 *************************************************************************/
2627 void mincgsetstpmax(const mincgstate &state, const double stpmax);
2628 
2629 
2630 /*************************************************************************
2631 This function allows to suggest initial step length to the CG algorithm.
2632 
2633 Suggested step length is used as starting point for the line search. It
2634 can be useful when you have badly scaled problem, i.e. when ||grad||
2635 (which is used as initial estimate for the first step) is many orders of
2636 magnitude different from the desired step.
2637 
2638 Line search may fail on such problems without good estimate of initial
2639 step length. Imagine, for example, problem with ||grad||=10^50 and desired
2640 step equal to 0.1 Line search function will use 10^50 as initial step,
2641 then it will decrease step length by 2 (up to 20 attempts) and will get
2642 10^44, which is still too large.
2643 
2644 This function allows us to tell than line search should be started from
2645 some moderate step length, like 1.0, so algorithm will be able to detect
2646 desired step length in a several searches.
2647 
2648 Default behavior (when no step is suggested) is to use preconditioner, if
2649 it is available, to generate initial estimate of step length.
2650 
2651 This function influences only first iteration of algorithm. It should be
2652 called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call.
2653 Suggested step is ignored if you have preconditioner.
2654 
2655 INPUT PARAMETERS:
2656  State - structure used to store algorithm state.
2657  Stp - initial estimate of the step length.
2658  Can be zero (no estimate).
2659 
2660  -- ALGLIB --
2661  Copyright 30.07.2010 by Bochkanov Sergey
2662 *************************************************************************/
2663 void mincgsuggeststep(const mincgstate &state, const double stp);
2664 
2665 
2666 /*************************************************************************
2667 Modification of the preconditioner: preconditioning is turned off.
2668 
2669 INPUT PARAMETERS:
2670  State - structure which stores algorithm state
2671 
2672 NOTE: you can change preconditioner "on the fly", during algorithm
2673 iterations.
2674 
2675  -- ALGLIB --
2676  Copyright 13.10.2010 by Bochkanov Sergey
2677 *************************************************************************/
2678 void mincgsetprecdefault(const mincgstate &state);
2679 
2680 
2681 /*************************************************************************
2682 Modification of the preconditioner: diagonal of approximate Hessian is
2683 used.
2684 
2685 INPUT PARAMETERS:
2686  State - structure which stores algorithm state
2687  D - diagonal of the approximate Hessian, array[0..N-1],
2688  (if larger, only leading N elements are used).
2689 
2690 NOTE: you can change preconditioner "on the fly", during algorithm
2691 iterations.
2692 
2693 NOTE 2: D[i] should be positive. Exception will be thrown otherwise.
2694 
2695 NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
2696 
2697  -- ALGLIB --
2698  Copyright 13.10.2010 by Bochkanov Sergey
2699 *************************************************************************/
2700 void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d);
2701 
2702 
2703 /*************************************************************************
2704 Modification of the preconditioner: scale-based diagonal preconditioning.
2705 
2706 This preconditioning mode can be useful when you don't have approximate
2707 diagonal of Hessian, but you know that your variables are badly scaled
2708 (for example, one variable is in [1,10], and another in [1000,100000]),
2709 and most part of the ill-conditioning comes from different scales of vars.
2710 
2711 In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
2712 can greatly improve convergence.
2713 
2714 IMPRTANT: you should set scale of your variables with MinCGSetScale() call
2715 (before or after MinCGSetPrecScale() call). Without knowledge of the scale
2716 of your variables scale-based preconditioner will be just unit matrix.
2717 
2718 INPUT PARAMETERS:
2719  State - structure which stores algorithm state
2720 
2721 NOTE: you can change preconditioner "on the fly", during algorithm
2722 iterations.
2723 
2724  -- ALGLIB --
2725  Copyright 13.10.2010 by Bochkanov Sergey
2726 *************************************************************************/
2727 void mincgsetprecscale(const mincgstate &state);
2728 
2729 
2730 /*************************************************************************
2731 This function provides reverse communication interface
2732 Reverse communication interface is not documented or recommended to use.
2733 See below for functions which provide better documented API
2734 *************************************************************************/
2735 bool mincgiteration(const mincgstate &state);
2736 
2737 
2738 /*************************************************************************
2739 This family of functions is used to launcn iterations of nonlinear optimizer
2740 
2741 These functions accept following parameters:
2742  state - algorithm state
2743  func - callback which calculates function (or merit function)
2744  value func at given point x
2745  grad - callback which calculates function (or merit function)
2746  value func and gradient grad at given point x
2747  rep - optional callback which is called after each iteration
2748  can be NULL
2749  ptr - optional pointer which is passed to func/grad/hess/jac/rep
2750  can be NULL
2751 
2752 NOTES:
2753 
2754 1. This function has two different implementations: one which uses exact
2755  (analytical) user-supplied gradient, and one which uses function value
2756  only and numerically differentiates function in order to obtain
2757  gradient.
2758 
2759  Depending on the specific function used to create optimizer object
2760  (either MinCGCreate() for analytical gradient or MinCGCreateF() for
2761  numerical differentiation) you should choose appropriate variant of
2762  MinCGOptimize() - one which accepts function AND gradient or one which
2763  accepts function ONLY.
2764 
2765  Be careful to choose variant of MinCGOptimize() which corresponds to
2766  your optimization scheme! Table below lists different combinations of
2767  callback (function/gradient) passed to MinCGOptimize() and specific
2768  function used to create optimizer.
2769 
2770 
2771  | USER PASSED TO MinCGOptimize()
2772  CREATED WITH | function only | function and gradient
2773  ------------------------------------------------------------
2774  MinCGCreateF() | work FAIL
2775  MinCGCreate() | FAIL work
2776 
2777  Here "FAIL" denotes inappropriate combinations of optimizer creation
2778  function and MinCGOptimize() version. Attemps to use such combination
2779  (for example, to create optimizer with MinCGCreateF() and to pass
2780  gradient information to MinCGOptimize()) will lead to exception being
2781  thrown. Either you did not pass gradient when it WAS needed or you
2782  passed gradient when it was NOT needed.
2783 
2784  -- ALGLIB --
2785  Copyright 20.04.2009 by Bochkanov Sergey
2786 
2787 *************************************************************************/
2789  void (*func)(const real_1d_array &x, double &func, void *ptr),
2790  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
2791  void *ptr = NULL);
2793  void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
2794  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
2795  void *ptr = NULL);
2796 
2797 
2798 /*************************************************************************
2799 Conjugate gradient results
2800 
2801 INPUT PARAMETERS:
2802  State - algorithm state
2803 
2804 OUTPUT PARAMETERS:
2805  X - array[0..N-1], solution
2806  Rep - optimization report:
2807  * Rep.TerminationType completetion code:
2808  * -8 internal integrity control detected infinite
2809  or NAN values in function/gradient. Abnormal
2810  termination signalled.
2811  * -7 gradient verification failed.
2812  See MinCGSetGradientCheck() for more information.
2813  * 1 relative function improvement is no more than
2814  EpsF.
2815  * 2 relative step is no more than EpsX.
2816  * 4 gradient norm is no more than EpsG
2817  * 5 MaxIts steps was taken
2818  * 7 stopping conditions are too stringent,
2819  further improvement is impossible,
2820  we return best X found so far
2821  * 8 terminated by user
2822  * Rep.IterationsCount contains iterations count
2823  * NFEV countains number of function calculations
2824 
2825  -- ALGLIB --
2826  Copyright 20.04.2009 by Bochkanov Sergey
2827 *************************************************************************/
2828 void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &rep);
2829 
2830 
2831 /*************************************************************************
2832 Conjugate gradient results
2833 
2834 Buffered implementation of MinCGResults(), which uses pre-allocated buffer
2835 to store X[]. If buffer size is too small, it resizes buffer. It is
2836 intended to be used in the inner cycles of performance critical algorithms
2837 where array reallocation penalty is too large to be ignored.
2838 
2839  -- ALGLIB --
2840  Copyright 20.04.2009 by Bochkanov Sergey
2841 *************************************************************************/
2843 
2844 
2845 /*************************************************************************
2846 This subroutine restarts CG algorithm from new point. All optimization
2847 parameters are left unchanged.
2848 
2849 This function allows to solve multiple optimization problems (which
2850 must have same number of dimensions) without object reallocation penalty.
2851 
2852 INPUT PARAMETERS:
2853  State - structure used to store algorithm state.
2854  X - new starting point.
2855 
2856  -- ALGLIB --
2857  Copyright 30.07.2010 by Bochkanov Sergey
2858 *************************************************************************/
2859 void mincgrestartfrom(const mincgstate &state, const real_1d_array &x);
2860 
2861 
2862 /*************************************************************************
2863 This subroutine submits request for termination of running optimizer. It
2864 should be called from user-supplied callback when user decides that it is
2865 time to "smoothly" terminate optimization process. As result, optimizer
2866 stops at point which was "current accepted" when termination request was
2867 submitted and returns error code 8 (successful termination).
2868 
2869 INPUT PARAMETERS:
2870  State - optimizer structure
2871 
2872 NOTE: after request for termination optimizer may perform several
2873  additional calls to user-supplied callbacks. It does NOT guarantee
2874  to stop immediately - it just guarantees that these additional calls
2875  will be discarded later.
2876 
2877 NOTE: calling this function on optimizer which is NOT running will have no
2878  effect.
2879 
2880 NOTE: multiple calls to this function are possible. First call is counted,
2881  subsequent calls are silently ignored.
2882 
2883  -- ALGLIB --
2884  Copyright 08.10.2014 by Bochkanov Sergey
2885 *************************************************************************/
2887 
2888 
2889 /*************************************************************************
2890 
2891 This subroutine turns on verification of the user-supplied analytic
2892 gradient:
2893 * user calls this subroutine before optimization begins
2894 * MinCGOptimize() is called
2895 * prior to actual optimization, for each component of parameters being
2896  optimized X[i] algorithm performs following steps:
2897  * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
2898  where X[i] is i-th component of the initial point and S[i] is a scale
2899  of i-th parameter
2900  * F(X) is evaluated at these trial points
2901  * we perform one more evaluation in the middle point of the interval
2902  * we build cubic model using function values and derivatives at trial
2903  points and we compare its prediction with actual value in the middle
2904  point
2905  * in case difference between prediction and actual value is higher than
2906  some predetermined threshold, algorithm stops with completion code -7;
2907  Rep.VarIdx is set to index of the parameter with incorrect derivative.
2908 * after verification is over, algorithm proceeds to the actual optimization.
2909 
2910 NOTE 1: verification needs N (parameters count) gradient evaluations. It
2911  is very costly and you should use it only for low dimensional
2912  problems, when you want to be sure that you've correctly
2913  calculated analytic derivatives. You should not use it in the
2914  production code (unless you want to check derivatives provided by
2915  some third party).
2916 
2917 NOTE 2: you should carefully choose TestStep. Value which is too large
2918  (so large that function behaviour is significantly non-cubic) will
2919  lead to false alarms. You may use different step for different
2920  parameters by means of setting scale with MinCGSetScale().
2921 
2922 NOTE 3: this function may lead to false positives. In case it reports that
2923  I-th derivative was calculated incorrectly, you may decrease test
2924  step and try one more time - maybe your function changes too
2925  sharply and your step is too large for such rapidly chanding
2926  function.
2927 
2928 INPUT PARAMETERS:
2929  State - structure used to store algorithm state
2930  TestStep - verification step:
2931  * TestStep=0 turns verification off
2932  * TestStep>0 activates verification
2933 
2934  -- ALGLIB --
2935  Copyright 31.05.2012 by Bochkanov Sergey
2936 *************************************************************************/
2937 void mincgsetgradientcheck(const mincgstate &state, const double teststep);
2938 
2939 /*************************************************************************
2940  BOUND CONSTRAINED OPTIMIZATION
2941  WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS
2942 
2943 DESCRIPTION:
2944 The subroutine minimizes function F(x) of N arguments subject to any
2945 combination of:
2946 * bound constraints
2947 * linear inequality constraints
2948 * linear equality constraints
2949 
2950 REQUIREMENTS:
2951 * user must provide function value and gradient
2952 * starting point X0 must be feasible or
2953  not too far away from the feasible set
2954 * grad(f) must be Lipschitz continuous on a level set:
2955  L = { x : f(x)<=f(x0) }
2956 * function must be defined everywhere on the feasible set F
2957 
2958 USAGE:
2959 
2960 Constrained optimization if far more complex than the unconstrained one.
2961 Here we give very brief outline of the BLEIC optimizer. We strongly recommend
2962 you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
2963 on optimization, which is available at http://www.alglib.net/optimization/
2964 
2965 1. User initializes algorithm state with MinBLEICCreate() call
2966 
2967 2. USer adds boundary and/or linear constraints by calling
2968  MinBLEICSetBC() and MinBLEICSetLC() functions.
2969 
2970 3. User sets stopping conditions with MinBLEICSetCond().
2971 
2972 4. User calls MinBLEICOptimize() function which takes algorithm state and
2973  pointer (delegate, etc.) to callback function which calculates F/G.
2974 
2975 5. User calls MinBLEICResults() to get solution
2976 
2977 6. Optionally user may call MinBLEICRestartFrom() to solve another problem
2978  with same N but another starting point.
2979  MinBLEICRestartFrom() allows to reuse already initialized structure.
2980 
2981 NOTE: if you have box-only constraints (no general linear constraints),
2982  then MinBC optimizer can be better option. It uses special, faster
2983  constraint activation method, which performs better on problems with
2984  multiple constraints active at the solution.
2985 
2986  On small-scale problems performance of MinBC is similar to that of
2987  MinBLEIC, but on large-scale ones (hundreds and thousands of active
2988  constraints) it can be several times faster than MinBLEIC.
2989 
2990 INPUT PARAMETERS:
2991  N - problem dimension, N>0:
2992  * if given, only leading N elements of X are used
2993  * if not given, automatically determined from size ofX
2994  X - starting point, array[N]:
2995  * it is better to set X to a feasible point
2996  * but X can be infeasible, in which case algorithm will try
2997  to find feasible point first, using X as initial
2998  approximation.
2999 
3000 OUTPUT PARAMETERS:
3001  State - structure stores algorithm state
3002 
3003  -- ALGLIB --
3004  Copyright 28.11.2010 by Bochkanov Sergey
3005 *************************************************************************/
3006 void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate &state);
3008 
3009 
3010 /*************************************************************************
3011 The subroutine is finite difference variant of MinBLEICCreate(). It uses
3012 finite differences in order to differentiate target function.
3013 
3014 Description below contains information which is specific to this function
3015 only. We recommend to read comments on MinBLEICCreate() in order to get
3016 more information about creation of BLEIC optimizer.
3017 
3018 INPUT PARAMETERS:
3019  N - problem dimension, N>0:
3020  * if given, only leading N elements of X are used
3021  * if not given, automatically determined from size of X
3022  X - starting point, array[0..N-1].
3023  DiffStep- differentiation step, >0
3024 
3025 OUTPUT PARAMETERS:
3026  State - structure which stores algorithm state
3027 
3028 NOTES:
3029 1. algorithm uses 4-point central formula for differentiation.
3030 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
3031  S[] is scaling vector which can be set by MinBLEICSetScale() call.
3032 3. we recommend you to use moderate values of differentiation step. Too
3033  large step will result in too large truncation errors, while too small
3034  step will result in too large numerical errors. 1.0E-6 can be good
3035  value to start with.
3036 4. Numerical differentiation is very inefficient - one gradient
3037  calculation needs 4*N function evaluations. This function will work for
3038  any N - either small (1...10), moderate (10...100) or large (100...).
3039  However, performance penalty will be too severe for any N's except for
3040  small ones.
3041  We should also say that code which relies on numerical differentiation
3042  is less robust and precise. CG needs exact gradient values. Imprecise
3043  gradient may slow down convergence, especially on highly nonlinear
3044  problems.
3045  Thus we recommend to use this function for fast prototyping on small-
3046  dimensional problems only, and to implement analytical gradient as soon
3047  as possible.
3048 
3049  -- ALGLIB --
3050  Copyright 16.05.2011 by Bochkanov Sergey
3051 *************************************************************************/
3052 void minbleiccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbleicstate &state);
3053 void minbleiccreatef(const real_1d_array &x, const double diffstep, minbleicstate &state);
3054 
3055 
3056 /*************************************************************************
3057 This function sets boundary constraints for BLEIC optimizer.
3058 
3059 Boundary constraints are inactive by default (after initial creation).
3060 They are preserved after algorithm restart with MinBLEICRestartFrom().
3061 
3062 NOTE: if you have box-only constraints (no general linear constraints),
3063  then MinBC optimizer can be better option. It uses special, faster
3064  constraint activation method, which performs better on problems with
3065  multiple constraints active at the solution.
3066 
3067  On small-scale problems performance of MinBC is similar to that of
3068  MinBLEIC, but on large-scale ones (hundreds and thousands of active
3069  constraints) it can be several times faster than MinBLEIC.
3070 
3071 INPUT PARAMETERS:
3072  State - structure stores algorithm state
3073  BndL - lower bounds, array[N].
3074  If some (all) variables are unbounded, you may specify
3075  very small number or -INF.
3076  BndU - upper bounds, array[N].
3077  If some (all) variables are unbounded, you may specify
3078  very large number or +INF.
3079 
3080 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
3081 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
3082 
3083 NOTE 2: this solver has following useful properties:
3084 * bound constraints are always satisfied exactly
3085 * function is evaluated only INSIDE area specified by bound constraints,
3086  even when numerical differentiation is used (algorithm adjusts nodes
3087  according to boundary constraints)
3088 
3089  -- ALGLIB --
3090  Copyright 28.11.2010 by Bochkanov Sergey
3091 *************************************************************************/
3092 void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
3093 
3094 
3095 /*************************************************************************
3096 This function sets linear constraints for BLEIC optimizer.
3097 
3098 Linear constraints are inactive by default (after initial creation).
3099 They are preserved after algorithm restart with MinBLEICRestartFrom().
3100 
3101 INPUT PARAMETERS:
3102  State - structure previously allocated with MinBLEICCreate call.
3103  C - linear constraints, array[K,N+1].
3104  Each row of C represents one constraint, either equality
3105  or inequality (see below):
3106  * first N elements correspond to coefficients,
3107  * last element corresponds to the right part.
3108  All elements of C (including right part) must be finite.
3109  CT - type of constraints, array[K]:
3110  * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
3111  * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
3112  * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
3113  K - number of equality/inequality constraints, K>=0:
3114  * if given, only leading K elements of C/CT are used
3115  * if not given, automatically determined from sizes of C/CT
3116 
3117 NOTE 1: linear (non-bound) constraints are satisfied only approximately:
3118 * there always exists some minor violation (about Epsilon in magnitude)
3119  due to rounding errors
3120 * numerical differentiation, if used, may lead to function evaluations
3121  outside of the feasible area, because algorithm does NOT change
3122  numerical differentiation formula according to linear constraints.
3123 If you want constraints to be satisfied exactly, try to reformulate your
3124 problem in such manner that all constraints will become boundary ones
3125 (this kind of constraints is always satisfied exactly, both in the final
3126 solution and in all intermediate points).
3127 
3128  -- ALGLIB --
3129  Copyright 28.11.2010 by Bochkanov Sergey
3130 *************************************************************************/
3131 void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
3132 void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct);
3133 
3134 
3135 /*************************************************************************
3136 This function sets stopping conditions for the optimizer.
3137 
3138 INPUT PARAMETERS:
3139  State - structure which stores algorithm state
3140  EpsG - >=0
3141  The subroutine finishes its work if the condition
3142  |v|<EpsG is satisfied, where:
3143  * |.| means Euclidian norm
3144  * v - scaled gradient vector, v[i]=g[i]*s[i]
3145  * g - gradient
3146  * s - scaling coefficients set by MinBLEICSetScale()
3147  EpsF - >=0
3148  The subroutine finishes its work if on k+1-th iteration
3149  the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
3150  is satisfied.
3151  EpsX - >=0
3152  The subroutine finishes its work if on k+1-th iteration
3153  the condition |v|<=EpsX is fulfilled, where:
3154  * |.| means Euclidian norm
3155  * v - scaled step vector, v[i]=dx[i]/s[i]
3156  * dx - step vector, dx=X(k+1)-X(k)
3157  * s - scaling coefficients set by MinBLEICSetScale()
3158  MaxIts - maximum number of iterations. If MaxIts=0, the number of
3159  iterations is unlimited.
3160 
3161 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
3162 to automatic stopping criterion selection.
3163 
3164 NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform
3165  slightly more than MaxIts iterations. I.e., MaxIts sets non-strict
3166  limit on iterations count.
3167 
3168  -- ALGLIB --
3169  Copyright 28.11.2010 by Bochkanov Sergey
3170 *************************************************************************/
3171 void minbleicsetcond(const minbleicstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
3172 
3173 
3174 /*************************************************************************
3175 This function sets scaling coefficients for BLEIC optimizer.
3176 
3177 ALGLIB optimizers use scaling matrices to test stopping conditions (step
3178 size and gradient are scaled before comparison with tolerances). Scale of
3179 the I-th variable is a translation invariant measure of:
3180 a) "how large" the variable is
3181 b) how large the step should be to make significant changes in the function
3182 
3183 Scaling is also used by finite difference variant of the optimizer - step
3184 along I-th axis is equal to DiffStep*S[I].
3185 
3186 In most optimizers (and in the BLEIC too) scaling is NOT a form of
3187 preconditioning. It just affects stopping conditions. You should set
3188 preconditioner by separate call to one of the MinBLEICSetPrec...()
3189 functions.
3190 
3191 There is a special preconditioning mode, however, which uses scaling
3192 coefficients to form diagonal preconditioning matrix. You can turn this
3193 mode on, if you want. But you should understand that scaling is not the
3194 same thing as preconditioning - these are two different, although related
3195 forms of tuning solver.
3196 
3197 INPUT PARAMETERS:
3198  State - structure stores algorithm state
3199  S - array[N], non-zero scaling coefficients
3200  S[i] may be negative, sign doesn't matter.
3201 
3202  -- ALGLIB --
3203  Copyright 14.01.2011 by Bochkanov Sergey
3204 *************************************************************************/
3205 void minbleicsetscale(const minbleicstate &state, const real_1d_array &s);
3206 
3207 
3208 /*************************************************************************
3209 Modification of the preconditioner: preconditioning is turned off.
3210 
3211 INPUT PARAMETERS:
3212  State - structure which stores algorithm state
3213 
3214  -- ALGLIB --
3215  Copyright 13.10.2010 by Bochkanov Sergey
3216 *************************************************************************/
3218 
3219 
3220 /*************************************************************************
3221 Modification of the preconditioner: diagonal of approximate Hessian is
3222 used.
3223 
3224 INPUT PARAMETERS:
3225  State - structure which stores algorithm state
3226  D - diagonal of the approximate Hessian, array[0..N-1],
3227  (if larger, only leading N elements are used).
3228 
3229 NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
3230 
3231 NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
3232 
3233  -- ALGLIB --
3234  Copyright 13.10.2010 by Bochkanov Sergey
3235 *************************************************************************/
3236 void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d);
3237 
3238 
3239 /*************************************************************************
3240 Modification of the preconditioner: scale-based diagonal preconditioning.
3241 
3242 This preconditioning mode can be useful when you don't have approximate
3243 diagonal of Hessian, but you know that your variables are badly scaled
3244 (for example, one variable is in [1,10], and another in [1000,100000]),
3245 and most part of the ill-conditioning comes from different scales of vars.
3246 
3247 In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
3248 can greatly improve convergence.
3249 
3250 IMPRTANT: you should set scale of your variables with MinBLEICSetScale()
3251 call (before or after MinBLEICSetPrecScale() call). Without knowledge of
3252 the scale of your variables scale-based preconditioner will be just unit
3253 matrix.
3254 
3255 INPUT PARAMETERS:
3256  State - structure which stores algorithm state
3257 
3258  -- ALGLIB --
3259  Copyright 13.10.2010 by Bochkanov Sergey
3260 *************************************************************************/
3262 
3263 
3264 /*************************************************************************
3265 This function turns on/off reporting.
3266 
3267 INPUT PARAMETERS:
3268  State - structure which stores algorithm state
3269  NeedXRep- whether iteration reports are needed or not
3270 
3271 If NeedXRep is True, algorithm will call rep() callback function if it is
3272 provided to MinBLEICOptimize().
3273 
3274  -- ALGLIB --
3275  Copyright 28.11.2010 by Bochkanov Sergey
3276 *************************************************************************/
3277 void minbleicsetxrep(const minbleicstate &state, const bool needxrep);
3278 
3279 
3280 /*************************************************************************
3281 This function sets maximum step length
3282 
3283 IMPORTANT: this feature is hard to combine with preconditioning. You can't
3284 set upper limit on step length, when you solve optimization problem with
3285 linear (non-boundary) constraints AND preconditioner turned on.
3286 
3287 When non-boundary constraints are present, you have to either a) use
3288 preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH!
3289 In this case algorithm will terminate with appropriate error code.
3290 
3291 INPUT PARAMETERS:
3292  State - structure which stores algorithm state
3293  StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
3294  want to limit step length.
3295 
3296 Use this subroutine when you optimize target function which contains exp()
3297 or other fast growing functions, and optimization algorithm makes too
3298 large steps which lead to overflow. This function allows us to reject
3299 steps that are too large (and therefore expose us to the possible
3300 overflow) without actually calculating function value at the x+stp*d.
3301 
3302  -- ALGLIB --
3303  Copyright 02.04.2010 by Bochkanov Sergey
3304 *************************************************************************/
3305 void minbleicsetstpmax(const minbleicstate &state, const double stpmax);
3306 
3307 
3308 /*************************************************************************
3309 This function provides reverse communication interface
3310 Reverse communication interface is not documented or recommended to use.
3311 See below for functions which provide better documented API
3312 *************************************************************************/
3314 
3315 
3316 /*************************************************************************
3317 This family of functions is used to launcn iterations of nonlinear optimizer
3318 
3319 These functions accept following parameters:
3320  state - algorithm state
3321  func - callback which calculates function (or merit function)
3322  value func at given point x
3323  grad - callback which calculates function (or merit function)
3324  value func and gradient grad at given point x
3325  rep - optional callback which is called after each iteration
3326  can be NULL
3327  ptr - optional pointer which is passed to func/grad/hess/jac/rep
3328  can be NULL
3329 
3330 NOTES:
3331 
3332 1. This function has two different implementations: one which uses exact
3333  (analytical) user-supplied gradient, and one which uses function value
3334  only and numerically differentiates function in order to obtain
3335  gradient.
3336 
3337  Depending on the specific function used to create optimizer object
3338  (either MinBLEICCreate() for analytical gradient or MinBLEICCreateF()
3339  for numerical differentiation) you should choose appropriate variant of
3340  MinBLEICOptimize() - one which accepts function AND gradient or one
3341  which accepts function ONLY.
3342 
3343  Be careful to choose variant of MinBLEICOptimize() which corresponds to
3344  your optimization scheme! Table below lists different combinations of
3345  callback (function/gradient) passed to MinBLEICOptimize() and specific
3346  function used to create optimizer.
3347 
3348 
3349  | USER PASSED TO MinBLEICOptimize()
3350  CREATED WITH | function only | function and gradient
3351  ------------------------------------------------------------
3352  MinBLEICCreateF() | work FAIL
3353  MinBLEICCreate() | FAIL work
3354 
3355  Here "FAIL" denotes inappropriate combinations of optimizer creation
3356  function and MinBLEICOptimize() version. Attemps to use such
3357  combination (for example, to create optimizer with MinBLEICCreateF()
3358  and to pass gradient information to MinCGOptimize()) will lead to
3359  exception being thrown. Either you did not pass gradient when it WAS
3360  needed or you passed gradient when it was NOT needed.
3361 
3362  -- ALGLIB --
3363  Copyright 28.11.2010 by Bochkanov Sergey
3364 
3365 *************************************************************************/
3367  void (*func)(const real_1d_array &x, double &func, void *ptr),
3368  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3369  void *ptr = NULL);
3371  void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
3372  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
3373  void *ptr = NULL);
3374 
3375 
3376 /*************************************************************************
3377 BLEIC results
3378 
3379 INPUT PARAMETERS:
3380  State - algorithm state
3381 
3382 OUTPUT PARAMETERS:
3383  X - array[0..N-1], solution
3384  Rep - optimization report. You should check Rep.TerminationType
3385  in order to distinguish successful termination from
3386  unsuccessful one:
3387  * -8 internal integrity control detected infinite or
3388  NAN values in function/gradient. Abnormal
3389  termination signalled.
3390  * -7 gradient verification failed.
3391  See MinBLEICSetGradientCheck() for more information.
3392  * -3 inconsistent constraints. Feasible point is
3393  either nonexistent or too hard to find. Try to
3394  restart optimizer with better initial approximation
3395  * 1 relative function improvement is no more than EpsF.
3396  * 2 scaled step is no more than EpsX.
3397  * 4 scaled gradient norm is no more than EpsG.
3398  * 5 MaxIts steps was taken
3399  * 8 terminated by user who called minbleicrequesttermination().
3400  X contains point which was "current accepted" when
3401  termination request was submitted.
3402  More information about fields of this structure can be
3403  found in the comments on MinBLEICReport datatype.
3404 
3405  -- ALGLIB --
3406  Copyright 28.11.2010 by Bochkanov Sergey
3407 *************************************************************************/
3409 
3410 
3411 /*************************************************************************
3412 BLEIC results
3413 
3414 Buffered implementation of MinBLEICResults() which uses pre-allocated buffer
3415 to store X[]. If buffer size is too small, it resizes buffer. It is
3416 intended to be used in the inner cycles of performance critical algorithms
3417 where array reallocation penalty is too large to be ignored.
3418 
3419  -- ALGLIB --
3420  Copyright 28.11.2010 by Bochkanov Sergey
3421 *************************************************************************/
3423 
3424 
3425 /*************************************************************************
3426 This subroutine restarts algorithm from new point.
3427 All optimization parameters (including constraints) are left unchanged.
3428 
3429 This function allows to solve multiple optimization problems (which
3430 must have same number of dimensions) without object reallocation penalty.
3431 
3432 INPUT PARAMETERS:
3433  State - structure previously allocated with MinBLEICCreate call.
3434  X - new starting point.
3435 
3436  -- ALGLIB --
3437  Copyright 28.11.2010 by Bochkanov Sergey
3438 *************************************************************************/
3439 void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x);
3440 
3441 
3442 /*************************************************************************
3443 This subroutine submits request for termination of running optimizer. It
3444 should be called from user-supplied callback when user decides that it is
3445 time to "smoothly" terminate optimization process. As result, optimizer
3446 stops at point which was "current accepted" when termination request was
3447 submitted and returns error code 8 (successful termination).
3448 
3449 INPUT PARAMETERS:
3450  State - optimizer structure
3451 
3452 NOTE: after request for termination optimizer may perform several
3453  additional calls to user-supplied callbacks. It does NOT guarantee
3454  to stop immediately - it just guarantees that these additional calls
3455  will be discarded later.
3456 
3457 NOTE: calling this function on optimizer which is NOT running will have no
3458  effect.
3459 
3460 NOTE: multiple calls to this function are possible. First call is counted,
3461  subsequent calls are silently ignored.
3462 
3463  -- ALGLIB --
3464  Copyright 08.10.2014 by Bochkanov Sergey
3465 *************************************************************************/
3467 
3468 
3469 /*************************************************************************
3470 This subroutine turns on verification of the user-supplied analytic
3471 gradient:
3472 * user calls this subroutine before optimization begins
3473 * MinBLEICOptimize() is called
3474 * prior to actual optimization, for each component of parameters being
3475  optimized X[i] algorithm performs following steps:
3476  * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
3477  where X[i] is i-th component of the initial point and S[i] is a scale
3478  of i-th parameter
3479  * if needed, steps are bounded with respect to constraints on X[]
3480  * F(X) is evaluated at these trial points
3481  * we perform one more evaluation in the middle point of the interval
3482  * we build cubic model using function values and derivatives at trial
3483  points and we compare its prediction with actual value in the middle
3484  point
3485  * in case difference between prediction and actual value is higher than
3486  some predetermined threshold, algorithm stops with completion code -7;
3487  Rep.VarIdx is set to index of the parameter with incorrect derivative.
3488 * after verification is over, algorithm proceeds to the actual optimization.
3489 
3490 NOTE 1: verification needs N (parameters count) gradient evaluations. It
3491  is very costly and you should use it only for low dimensional
3492  problems, when you want to be sure that you've correctly
3493  calculated analytic derivatives. You should not use it in the
3494  production code (unless you want to check derivatives provided by
3495  some third party).
3496 
3497 NOTE 2: you should carefully choose TestStep. Value which is too large
3498  (so large that function behaviour is significantly non-cubic) will
3499  lead to false alarms. You may use different step for different
3500  parameters by means of setting scale with MinBLEICSetScale().
3501 
3502 NOTE 3: this function may lead to false positives. In case it reports that
3503  I-th derivative was calculated incorrectly, you may decrease test
3504  step and try one more time - maybe your function changes too
3505  sharply and your step is too large for such rapidly chanding
3506  function.
3507 
3508 INPUT PARAMETERS:
3509  State - structure used to store algorithm state
3510  TestStep - verification step:
3511  * TestStep=0 turns verification off
3512  * TestStep>0 activates verification
3513 
3514  -- ALGLIB --
3515  Copyright 15.06.2012 by Bochkanov Sergey
3516 *************************************************************************/
3517 void minbleicsetgradientcheck(const minbleicstate &state, const double teststep);
3518 
3519 
3520 
3521 /*************************************************************************
3522  CONSTRAINED QUADRATIC PROGRAMMING
3523 
3524 The subroutine creates QP optimizer. After initial creation, it contains
3525 default optimization problem with zero quadratic and linear terms and no
3526 constraints. You should set quadratic/linear terms with calls to functions
3527 provided by MinQP subpackage.
3528 
3529 You should also choose appropriate QP solver and set it and its stopping
3530 criteria by means of MinQPSetAlgo??????() function. Then, you should start
3531 solution process by means of MinQPOptimize() call. Solution itself can be
3532 obtained with MinQPResults() function.
3533 
3534 Following solvers are recommended:
3535 * QuickQP for dense problems with box-only constraints (or no constraints
3536  at all)
3537 * QP-BLEIC for dense/sparse problems with moderate (up to 50) number of
3538  general linear constraints
3539 * DENSE-AUL-QP for dense problems with any (small or large) number of
3540  general linear constraints
3541 
3542 INPUT PARAMETERS:
3543  N - problem size
3544 
3545 OUTPUT PARAMETERS:
3546  State - optimizer with zero quadratic/linear terms
3547  and no constraints
3548 
3549  -- ALGLIB --
3550  Copyright 11.01.2011 by Bochkanov Sergey
3551 *************************************************************************/
3552 void minqpcreate(const ae_int_t n, minqpstate &state);
3553 
3554 
3555 /*************************************************************************
3556 This function sets linear term for QP solver.
3557 
3558 By default, linear term is zero.
3559 
3560 INPUT PARAMETERS:
3561  State - structure which stores algorithm state
3562  B - linear term, array[N].
3563 
3564  -- ALGLIB --
3565  Copyright 11.01.2011 by Bochkanov Sergey
3566 *************************************************************************/
3567 void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b);
3568 
3569 
3570 /*************************************************************************
3571 This function sets dense quadratic term for QP solver. By default,
3572 quadratic term is zero.
3573 
3574 SUPPORT BY QP SOLVERS:
3575 
3576 Dense quadratic term can be handled by following QP solvers:
3577 * QuickQP
3578 * BLEIC-QP
3579 * Dense-AUL-QP
3580 
3581 IMPORTANT:
3582 
3583 This solver minimizes following function:
3584  f(x) = 0.5*x'*A*x + b'*x.
3585 Note that quadratic term has 0.5 before it. So if you want to minimize
3586  f(x) = x^2 + x
3587 you should rewrite your problem as follows:
3588  f(x) = 0.5*(2*x^2) + x
3589 and your matrix A will be equal to [[2.0]], not to [[1.0]]
3590 
3591 INPUT PARAMETERS:
3592  State - structure which stores algorithm state
3593  A - matrix, array[N,N]
3594  IsUpper - (optional) storage type:
3595  * if True, symmetric matrix A is given by its upper
3596  triangle, and the lower triangle isn't used
3597  * if False, symmetric matrix A is given by its lower
3598  triangle, and the upper triangle isn't used
3599  * if not given, both lower and upper triangles must be
3600  filled.
3601 
3602  -- ALGLIB --
3603  Copyright 11.01.2011 by Bochkanov Sergey
3604 *************************************************************************/
3605 void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper);
3606 void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a);
3607 
3608 
3609 /*************************************************************************
3610 This function sets sparse quadratic term for QP solver. By default,
3611 quadratic term is zero. This function overrides previous calls to
3612 minqpsetquadraticterm() or minqpsetquadratictermsparse().
3613 
3614 SUPPORT BY QP SOLVERS:
3615 
3616 Sparse quadratic term can be handled by following QP solvers:
3617 * QuickQP
3618 * BLEIC-QP
3619 * Dense-AUL-QP (internally converts sparse matrix to dense format)
3620 
3621 IMPORTANT:
3622 
3623 This solver minimizes following function:
3624  f(x) = 0.5*x'*A*x + b'*x.
3625 Note that quadratic term has 0.5 before it. So if you want to minimize
3626  f(x) = x^2 + x
3627 you should rewrite your problem as follows:
3628  f(x) = 0.5*(2*x^2) + x
3629 and your matrix A will be equal to [[2.0]], not to [[1.0]]
3630 
3631 INPUT PARAMETERS:
3632  State - structure which stores algorithm state
3633  A - matrix, array[N,N]
3634  IsUpper - (optional) storage type:
3635  * if True, symmetric matrix A is given by its upper
3636  triangle, and the lower triangle isn't used
3637  * if False, symmetric matrix A is given by its lower
3638  triangle, and the upper triangle isn't used
3639  * if not given, both lower and upper triangles must be
3640  filled.
3641 
3642  -- ALGLIB --
3643  Copyright 11.01.2011 by Bochkanov Sergey
3644 *************************************************************************/
3645 void minqpsetquadratictermsparse(const minqpstate &state, const sparsematrix &a, const bool isupper);
3646 
3647 
3648 /*************************************************************************
3649 This function sets starting point for QP solver. It is useful to have
3650 good initial approximation to the solution, because it will increase
3651 speed of convergence and identification of active constraints.
3652 
3653 INPUT PARAMETERS:
3654  State - structure which stores algorithm state
3655  X - starting point, array[N].
3656 
3657  -- ALGLIB --
3658  Copyright 11.01.2011 by Bochkanov Sergey
3659 *************************************************************************/
3660 void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x);
3661 
3662 
3663 /*************************************************************************
3664 This function sets origin for QP solver. By default, following QP program
3665 is solved:
3666 
3667  min(0.5*x'*A*x+b'*x)
3668 
3669 This function allows to solve different problem:
3670 
3671  min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
3672 
3673 Specification of non-zero origin affects function being minimized, but not
3674 constraints. Box and linear constraints are still calculated without
3675 origin.
3676 
3677 INPUT PARAMETERS:
3678  State - structure which stores algorithm state
3679  XOrigin - origin, array[N].
3680 
3681  -- ALGLIB --
3682  Copyright 11.01.2011 by Bochkanov Sergey
3683 *************************************************************************/
3684 void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin);
3685 
3686 
3687 /*************************************************************************
3688 This function sets scaling coefficients.
3689 
3690 ALGLIB optimizers use scaling matrices to test stopping conditions (step
3691 size and gradient are scaled before comparison with tolerances) and as
3692 preconditioner.
3693 
3694 Scale of the I-th variable is a translation invariant measure of:
3695 a) "how large" the variable is
3696 b) how large the step should be to make significant changes in the function
3697 
3698 INPUT PARAMETERS:
3699  State - structure stores algorithm state
3700  S - array[N], non-zero scaling coefficients
3701  S[i] may be negative, sign doesn't matter.
3702 
3703  -- ALGLIB --
3704  Copyright 14.01.2011 by Bochkanov Sergey
3705 *************************************************************************/
3706 void minqpsetscale(const minqpstate &state, const real_1d_array &s);
3707 
3708 
3709 /*************************************************************************
3710 DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
3711 
3712 
3713 This function tells solver to use Cholesky-based algorithm. This algorithm
3714 was deprecated in ALGLIB 3.9.0 because its performance is inferior to that
3715 of BLEIC-QP or QuickQP on high-dimensional problems. Furthermore, it
3716 supports only dense convex QP problems.
3717 
3718 This solver is no longer active by default.
3719 
3720 We recommend you to switch to AUL-QP, BLEIC-QP or QuickQP solver.
3721 
3722 
3723 DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
3724 
3725  -- ALGLIB --
3726  Copyright 11.01.2011 by Bochkanov Sergey
3727 *************************************************************************/
3729 
3730 
3731 /*************************************************************************
3732 This function tells solver to use BLEIC-based algorithm and sets stopping
3733 criteria for the algorithm.
3734 
3735 ALGORITHM FEATURES:
3736 
3737 * supports dense and sparse QP problems
3738 * supports box and general linear equality/inequality constraints
3739 * can solve all types of problems (convex, semidefinite, nonconvex) as
3740  long as they are bounded from below under constraints.
3741  Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1".
3742  Of course, global minimum is found only for positive definite and
3743  semidefinite problems. As for indefinite ones - only local minimum is
3744  found.
3745 
3746 ALGORITHM OUTLINE:
3747 
3748 * BLEIC-QP solver is just a driver function for MinBLEIC solver; it solves
3749  quadratic programming problem as general linearly constrained
3750  optimization problem, which is solved by means of BLEIC solver (part of
3751  ALGLIB, active set method).
3752 
3753 ALGORITHM LIMITATIONS:
3754 
3755 * this algorithm is fast enough for large-scale problems with small amount
3756  of general linear constraints (say, up to 50), but it is inefficient
3757  for problems with several hundreds of constraints. Iteration cost is
3758  roughly quadratic w.r.t. constraint count.
3759  Furthermore, it can not efficiently handle sparse constraints (they are
3760  converted to dense format prior to solution).
3761  Thus, if you have large and/or sparse constraint matrix and convex QP
3762  problem, Dense-AUL-QP solver may be better solution.
3763 * unlike QuickQP solver, this algorithm does not perform Newton steps and
3764  does not use Level 3 BLAS. Being general-purpose active set method, it
3765  can activate constraints only one-by-one. Thus, its performance is lower
3766  than that of QuickQP.
3767 * its precision is also a bit inferior to that of QuickQP. BLEIC-QP
3768  performs only LBFGS steps (no Newton steps), which are good at detecting
3769  neighborhood of the solution, buy needs many iterations to find solution
3770  with more than 6 digits of precision.
3771 
3772 INPUT PARAMETERS:
3773  State - structure which stores algorithm state
3774  EpsG - >=0
3775  The subroutine finishes its work if the condition
3776  |v|<EpsG is satisfied, where:
3777  * |.| means Euclidian norm
3778  * v - scaled constrained gradient vector, v[i]=g[i]*s[i]
3779  * g - gradient
3780  * s - scaling coefficients set by MinQPSetScale()
3781  EpsF - >=0
3782  The subroutine finishes its work if exploratory steepest
3783  descent step on k+1-th iteration satisfies following
3784  condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
3785  EpsX - >=0
3786  The subroutine finishes its work if exploratory steepest
3787  descent step on k+1-th iteration satisfies following
3788  condition:
3789  * |.| means Euclidian norm
3790  * v - scaled step vector, v[i]=dx[i]/s[i]
3791  * dx - step vector, dx=X(k+1)-X(k)
3792  * s - scaling coefficients set by MinQPSetScale()
3793  MaxIts - maximum number of iterations. If MaxIts=0, the number of
3794  iterations is unlimited. NOTE: this algorithm uses LBFGS
3795  iterations, which are relatively cheap, but improve
3796  function value only a bit. So you will need many iterations
3797  to converge - from 0.1*N to 10*N, depending on problem's
3798  condition number.
3799 
3800 IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM
3801 BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT!
3802 
3803 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
3804 to automatic stopping criterion selection (presently it is small step
3805 length, but it may change in the future versions of ALGLIB).
3806 
3807  -- ALGLIB --
3808  Copyright 11.01.2011 by Bochkanov Sergey
3809 *************************************************************************/
3810 void minqpsetalgobleic(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
3811 
3812 
3813 /*************************************************************************
3814 This function tells QP solver to use Dense-AUL algorithm and sets stopping
3815 criteria for the algorithm.
3816 
3817 ALGORITHM FEATURES:
3818 
3819 * supports dense and sparse QP problems; although it uses dense Cholesky
3820  to build preconditioner, it still works faster for sparse problems.
3821 * supports box and dense/sparse general linear equality/inequality
3822  constraints
3823 * convergence is theoretically proved for positive-definite (convex) QP
3824  problems. Semidefinite and non-convex problems can be solved as long as
3825  they are bounded from below under constraints, although without
3826  theoretical guarantees.
3827 * this solver is better than QP-BLEIC on problems with large number of
3828  general linear constraints.
3829 
3830 ALGORITHM OUTLINE:
3831 
3832 * this algorithm is an augmented Lagrangian method with dense
3833  preconditioner (hence its name). It is similar to barrier/penalty
3834  methods, but much more precise and faster.
3835 * it performs several outer iterations in order to refine values of the
3836  Lagrange multipliers. Single outer iteration is a solution of some
3837  unconstrained optimization problem: first it performs dense Cholesky
3838  factorization of the Hessian in order to build preconditioner (adaptive
3839  regularization is applied to enforce positive definiteness), and then
3840  it uses L-BFGS optimizer to solve optimization problem.
3841 * typically you need about 5-10 outer iterations to converge to solution
3842 
3843 ALGORITHM LIMITATIONS:
3844 
3845 * because dense Cholesky driver is used, this algorithm has O(N^2) memory
3846  requirements and O(OuterIterations*N^3) minimum running time. From the
3847  practical point of view, it limits its applicability by several
3848  thousands of variables.
3849  From the other side, variables count is the most limiting factor,
3850  and dependence on constraint count is much more lower. Assuming that
3851  constraint matrix is sparse, it may handle tens of thousands of general
3852  linear constraints.
3853 * its precision is lower than that of BLEIC-QP and QuickQP. It is hard to
3854  find solution with more than 6 digits of precision.
3855 
3856 INPUT PARAMETERS:
3857  State - structure which stores algorithm state
3858  EpsX - >=0, stopping criteria for inner optimizer.
3859  Inner iterations are stopped when step length (with
3860  variable scaling being applied) is less than EpsX.
3861  See minqpsetscale() for more information on variable
3862  scaling.
3863  Rho - penalty coefficient, Rho>0:
3864  * large enough that algorithm converges with desired
3865  precision.
3866  * not TOO large to prevent ill-conditioning
3867  * recommended values are 100, 1000 or 10000
3868  ItsCnt - number of outer iterations:
3869  * recommended values: 10-15 (although in most cases it
3870  converges within 5 iterations, you may need a few more
3871  to be sure).
3872  * ItsCnt=0 means that small number of outer iterations is
3873  automatically chosen (10 iterations in current version).
3874  * ItsCnt=1 means that AUL algorithm performs just as usual
3875  barrier method.
3876  * ItsCnt>1 means that AUL algorithm performs specified
3877  number of outer iterations
3878 
3879 IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM
3880 BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT!
3881 
3882 NOTE: Passing EpsX=0 will lead to automatic step length selection
3883  (specific step length chosen may change in the future versions of
3884  ALGLIB, so it is better to specify step length explicitly).
3885 
3886  -- ALGLIB --
3887  Copyright 20.08.2016 by Bochkanov Sergey
3888 *************************************************************************/
3889 void minqpsetalgodenseaul(const minqpstate &state, const double epsx, const double rho, const ae_int_t itscnt);
3890 
3891 
3892 /*************************************************************************
3893 This function tells solver to use QuickQP algorithm: special extra-fast
3894 algorithm for problems with box-only constrants. It may solve non-convex
3895 problems as long as they are bounded from below under constraints.
3896 
3897 ALGORITHM FEATURES:
3898 * many times (from 5x to 50x!) faster than BLEIC-based QP solver; utilizes
3899  accelerated methods for activation of constraints.
3900 * supports dense and sparse QP problems
3901 * supports ONLY box constraints; general linear constraints are NOT
3902  supported by this solver
3903 * can solve all types of problems (convex, semidefinite, nonconvex) as
3904  long as they are bounded from below under constraints.
3905  Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1".
3906  In convex/semidefinite case global minimum is returned, in nonconvex
3907  case - algorithm returns one of the local minimums.
3908 
3909 ALGORITHM OUTLINE:
3910 
3911 * algorithm performs two kinds of iterations: constrained CG iterations
3912  and constrained Newton iterations
3913 * initially it performs small number of constrained CG iterations, which
3914  can efficiently activate/deactivate multiple constraints
3915 * after CG phase algorithm tries to calculate Cholesky decomposition and
3916  to perform several constrained Newton steps. If Cholesky decomposition
3917  failed (matrix is indefinite even under constraints), we perform more
3918  CG iterations until we converge to such set of constraints that system
3919  matrix becomes positive definite. Constrained Newton steps greatly
3920  increase convergence speed and precision.
3921 * algorithm interleaves CG and Newton iterations which allows to handle
3922  indefinite matrices (CG phase) and quickly converge after final set of
3923  constraints is found (Newton phase). Combination of CG and Newton phases
3924  is called "outer iteration".
3925 * it is possible to turn off Newton phase (beneficial for semidefinite
3926  problems - Cholesky decomposition will fail too often)
3927 
3928 ALGORITHM LIMITATIONS:
3929 
3930 * algorithm does not support general linear constraints; only box ones
3931  are supported
3932 * Cholesky decomposition for sparse problems is performed with Skyline
3933  Cholesky solver, which is intended for low-profile matrices. No profile-
3934  reducing reordering of variables is performed in this version of ALGLIB.
3935 * problems with near-zero negative eigenvalues (or exacty zero ones) may
3936  experience about 2-3x performance penalty. The reason is that Cholesky
3937  decomposition can not be performed until we identify directions of zero
3938  and negative curvature and activate corresponding boundary constraints -
3939  but we need a lot of trial and errors because these directions are hard
3940  to notice in the matrix spectrum.
3941  In this case you may turn off Newton phase of algorithm.
3942  Large negative eigenvalues are not an issue, so highly non-convex
3943  problems can be solved very efficiently.
3944 
3945 INPUT PARAMETERS:
3946  State - structure which stores algorithm state
3947  EpsG - >=0
3948  The subroutine finishes its work if the condition
3949  |v|<EpsG is satisfied, where:
3950  * |.| means Euclidian norm
3951  * v - scaled constrained gradient vector, v[i]=g[i]*s[i]
3952  * g - gradient
3953  * s - scaling coefficients set by MinQPSetScale()
3954  EpsF - >=0
3955  The subroutine finishes its work if exploratory steepest
3956  descent step on k+1-th iteration satisfies following
3957  condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
3958  EpsX - >=0
3959  The subroutine finishes its work if exploratory steepest
3960  descent step on k+1-th iteration satisfies following
3961  condition:
3962  * |.| means Euclidian norm
3963  * v - scaled step vector, v[i]=dx[i]/s[i]
3964  * dx - step vector, dx=X(k+1)-X(k)
3965  * s - scaling coefficients set by MinQPSetScale()
3966  MaxOuterIts-maximum number of OUTER iterations. One outer iteration
3967  includes some amount of CG iterations (from 5 to ~N) and
3968  one or several (usually small amount) Newton steps. Thus,
3969  one outer iteration has high cost, but can greatly reduce
3970  funcation value.
3971  Use 0 if you do not want to limit number of outer iterations.
3972  UseNewton- use Newton phase or not:
3973  * Newton phase improves performance of positive definite
3974  dense problems (about 2 times improvement can be observed)
3975  * can result in some performance penalty on semidefinite
3976  or slightly negative definite problems - each Newton
3977  phase will bring no improvement (Cholesky failure), but
3978  still will require computational time.
3979  * if you doubt, you can turn off this phase - optimizer
3980  will retain its most of its high speed.
3981 
3982 IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM
3983 BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT!
3984 
3985 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
3986 to automatic stopping criterion selection (presently it is small step
3987 length, but it may change in the future versions of ALGLIB).
3988 
3989  -- ALGLIB --
3990  Copyright 22.05.2014 by Bochkanov Sergey
3991 *************************************************************************/
3992 void minqpsetalgoquickqp(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxouterits, const bool usenewton);
3993 
3994 
3995 /*************************************************************************
3996 This function sets box constraints for QP solver
3997 
3998 Box constraints are inactive by default (after initial creation). After
3999 being set, they are preserved until explicitly turned off with another
4000 SetBC() call.
4001 
4002 All QP solvers may handle box constraints.
4003 
4004 INPUT PARAMETERS:
4005  State - structure stores algorithm state
4006  BndL - lower bounds, array[N].
4007  If some (all) variables are unbounded, you may specify
4008  very small number or -INF (latter is recommended because
4009  it will allow solver to use better algorithm).
4010  BndU - upper bounds, array[N].
4011  If some (all) variables are unbounded, you may specify
4012  very large number or +INF (latter is recommended because
4013  it will allow solver to use better algorithm).
4014 
4015 NOTE: it is possible to specify BndL[i]=BndU[i]. In this case I-th
4016 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
4017 
4018  -- ALGLIB --
4019  Copyright 11.01.2011 by Bochkanov Sergey
4020 *************************************************************************/
4021 void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
4022 
4023 
4024 /*************************************************************************
4025 This function sets dense linear constraints for QP optimizer.
4026 
4027 This function overrides results of previous calls to minqpsetlc(),
4028 minqpsetlcsparse() and minqpsetlcmixed(). After call to this function
4029 sparse constraints are dropped, and you have only those constraints which
4030 were specified in the present call.
4031 
4032 If you want to specify mixed (with dense and sparse terms) linear
4033 constraints, you should call minqpsetlcmixed().
4034 
4035 SUPPORT BY QP SOLVERS:
4036 
4037 Following QP solvers can handle dense linear constraints:
4038 * BLEIC-QP - handles them with high precision, but may be
4039  inefficient for problems with hundreds of constraints
4040 * Dense-AUL-QP - handles them with moderate precision (approx. 10^-6),
4041  may efficiently handle thousands of constraints.
4042 
4043 Following QP solvers can NOT handle dense linear constraints:
4044 * QuickQP - can not handle general linear constraints
4045 
4046 INPUT PARAMETERS:
4047  State - structure previously allocated with MinQPCreate call.
4048  C - linear constraints, array[K,N+1].
4049  Each row of C represents one constraint, either equality
4050  or inequality (see below):
4051  * first N elements correspond to coefficients,
4052  * last element corresponds to the right part.
4053  All elements of C (including right part) must be finite.
4054  CT - type of constraints, array[K]:
4055  * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
4056  * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
4057  * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
4058  K - number of equality/inequality constraints, K>=0:
4059  * if given, only leading K elements of C/CT are used
4060  * if not given, automatically determined from sizes of C/CT
4061 
4062 NOTE 1: linear (non-bound) constraints are satisfied only approximately -
4063  there always exists some violation due to numerical errors and
4064  algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP
4065  solver is less precise).
4066 
4067  -- ALGLIB --
4068  Copyright 19.06.2012 by Bochkanov Sergey
4069 *************************************************************************/
4070 void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
4071 void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct);
4072 
4073 
4074 /*************************************************************************
4075 This function sets sparse linear constraints for QP optimizer.
4076 
4077 This function overrides results of previous calls to minqpsetlc(),
4078 minqpsetlcsparse() and minqpsetlcmixed(). After call to this function
4079 dense constraints are dropped, and you have only those constraints which
4080 were specified in the present call.
4081 
4082 If you want to specify mixed (with dense and sparse terms) linear
4083 constraints, you should call minqpsetlcmixed().
4084 
4085 SUPPORT BY QP SOLVERS:
4086 
4087 Following QP solvers can handle sparse linear constraints:
4088 * BLEIC-QP - handles them with high precision, but can not
4089  utilize their sparsity - sparse constraint matrix
4090  is silently converted to dense format. Thus, it
4091  may be inefficient for problems with hundreds of
4092  constraints.
4093 * Dense-AUL-QP - although this solver uses dense linear algebra to
4094  calculate Cholesky preconditioner, it may
4095  efficiently handle sparse constraints. It may
4096  solve problems with hundreds and thousands of
4097  constraints. The only drawback is that precision
4098  of constraint handling is typically within 1E-4...
4099  ..1E-6 range.
4100 
4101 Following QP solvers can NOT handle sparse linear constraints:
4102 * QuickQP - can not handle general linear constraints
4103 
4104 INPUT PARAMETERS:
4105  State - structure previously allocated with MinQPCreate call.
4106  C - linear constraints, sparse matrix with dimensions at
4107  least [K,N+1]. If matrix has larger size, only leading
4108  Kx(N+1) rectangle is used.
4109  Each row of C represents one constraint, either equality
4110  or inequality (see below):
4111  * first N elements correspond to coefficients,
4112  * last element corresponds to the right part.
4113  All elements of C (including right part) must be finite.
4114  CT - type of constraints, array[K]:
4115  * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
4116  * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
4117  * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
4118  K - number of equality/inequality constraints, K>=0
4119 
4120 NOTE 1: linear (non-bound) constraints are satisfied only approximately -
4121  there always exists some violation due to numerical errors and
4122  algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP
4123  solver is less precise).
4124 
4125  -- ALGLIB --
4126  Copyright 22.08.2016 by Bochkanov Sergey
4127 *************************************************************************/
4128 void minqpsetlcsparse(const minqpstate &state, const sparsematrix &c, const integer_1d_array &ct, const ae_int_t k);
4129 
4130 
4131 /*************************************************************************
4132 This function sets mixed linear constraints, which include a set of dense
4133 rows, and a set of sparse rows.
4134 
4135 This function overrides results of previous calls to minqpsetlc(),
4136 minqpsetlcsparse() and minqpsetlcmixed().
4137 
4138 This function may be useful if constraint matrix includes large number of
4139 both types of rows - dense and sparse. If you have just a few sparse rows,
4140 you may represent them in dense format without loosing performance.
4141 Similarly, if you have just a few dense rows, you may store them in sparse
4142 format with almost same performance.
4143 
4144 SUPPORT BY QP SOLVERS:
4145 
4146 Following QP solvers can handle mixed dense/sparse linear constraints:
4147 * BLEIC-QP - handles them with high precision, but can not
4148  utilize their sparsity - sparse constraint matrix
4149  is silently converted to dense format. Thus, it
4150  may be inefficient for problems with hundreds of
4151  constraints.
4152 * Dense-AUL-QP - although this solver uses dense linear algebra to
4153  calculate Cholesky preconditioner, it may
4154  efficiently handle sparse constraints. It may
4155  solve problems with hundreds and thousands of
4156  constraints. The only drawback is that precision
4157  of constraint handling is typically within 1E-4...
4158  ..1E-6 range.
4159 
4160 Following QP solvers can NOT handle mixed linear constraints:
4161 * QuickQP - can not handle general linear constraints at all
4162 
4163 INPUT PARAMETERS:
4164  State - structure previously allocated with MinQPCreate call.
4165  DenseC - dense linear constraints, array[K,N+1].
4166  Each row of DenseC represents one constraint, either equality
4167  or inequality (see below):
4168  * first N elements correspond to coefficients,
4169  * last element corresponds to the right part.
4170  All elements of DenseC (including right part) must be finite.
4171  DenseCT - type of constraints, array[K]:
4172  * if DenseCT[i]>0, then I-th constraint is DenseC[i,*]*x >= DenseC[i,n+1]
4173  * if DenseCT[i]=0, then I-th constraint is DenseC[i,*]*x = DenseC[i,n+1]
4174  * if DenseCT[i]<0, then I-th constraint is DenseC[i,*]*x <= DenseC[i,n+1]
4175  DenseK - number of equality/inequality constraints, DenseK>=0
4176  SparseC - linear constraints, sparse matrix with dimensions at
4177  least [SparseK,N+1]. If matrix has larger size, only leading
4178  SPARSEKx(N+1) rectangle is used.
4179  Each row of C represents one constraint, either equality
4180  or inequality (see below):
4181  * first N elements correspond to coefficients,
4182  * last element corresponds to the right part.
4183  All elements of C (including right part) must be finite.
4184  SparseCT- type of sparse constraints, array[K]:
4185  * if SparseCT[i]>0, then I-th constraint is SparseC[i,*]*x >= SparseC[i,n+1]
4186  * if SparseCT[i]=0, then I-th constraint is SparseC[i,*]*x = SparseC[i,n+1]
4187  * if SparseCT[i]<0, then I-th constraint is SparseC[i,*]*x <= SparseC[i,n+1]
4188  SparseK - number of sparse equality/inequality constraints, K>=0
4189 
4190 NOTE 1: linear (non-bound) constraints are satisfied only approximately -
4191  there always exists some violation due to numerical errors and
4192  algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP
4193  solver is less precise).
4194 
4195  -- ALGLIB --
4196  Copyright 22.08.2016 by Bochkanov Sergey
4197 *************************************************************************/
4198 void minqpsetlcmixed(const minqpstate &state, const real_2d_array &densec, const integer_1d_array &densect, const ae_int_t densek, const sparsematrix &sparsec, const integer_1d_array &sparsect, const ae_int_t sparsek);
4199 
4200 
4201 /*************************************************************************
4202 This function solves quadratic programming problem.
4203 
4204 Prior to calling this function you should choose solver by means of one of
4205 the following functions:
4206 
4207 * minqpsetalgoquickqp() - for QuickQP solver
4208 * minqpsetalgobleic() - for BLEIC-QP solver
4209 * minqpsetalgodenseaul() - for Dense-AUL-QP solver
4210 
4211 These functions also allow you to control stopping criteria of the solver.
4212 If you did not set solver, MinQP subpackage will automatically select
4213 solver for your problem and will run it with default stopping criteria.
4214 
4215 However, it is better to set explicitly solver and its stopping criteria.
4216 
4217 INPUT PARAMETERS:
4218  State - algorithm state
4219 
4220 You should use MinQPResults() function to access results after calls
4221 to this function.
4222 
4223  -- ALGLIB --
4224  Copyright 11.01.2011 by Bochkanov Sergey.
4225  Special thanks to Elvira Illarionova for important suggestions on
4226  the linearly constrained QP algorithm.
4227 *************************************************************************/
4228 void minqpoptimize(const minqpstate &state);
4229 
4230 
4231 /*************************************************************************
4232 QP solver results
4233 
4234 INPUT PARAMETERS:
4235  State - algorithm state
4236 
4237 OUTPUT PARAMETERS:
4238  X - array[0..N-1], solution.
4239  This array is allocated and initialized only when
4240  Rep.TerminationType parameter is positive (success).
4241  Rep - optimization report. You should check Rep.TerminationType,
4242  which contains completion code, and you may check another
4243  fields which contain another information about algorithm
4244  functioning.
4245 
4246  Failure codes returned by algorithm are:
4247  * -5 inappropriate solver was used:
4248  * QuickQP solver for problem with general linear
4249  constraints
4250  * -4 BLEIC-QP/QuickQP solver found unconstrained
4251  direction of negative curvature (function is
4252  unbounded from below even under constraints), no
4253  meaningful minimum can be found.
4254  * -3 inconsistent constraints (or maybe feasible point
4255  is too hard to find). If you are sure that
4256  constraints are feasible, try to restart optimizer
4257  with better initial approximation.
4258 
4259  Completion codes specific for Cholesky algorithm:
4260  * 4 successful completion
4261 
4262  Completion codes specific for BLEIC/QuickQP algorithms:
4263  * 1 relative function improvement is no more than EpsF.
4264  * 2 scaled step is no more than EpsX.
4265  * 4 scaled gradient norm is no more than EpsG.
4266  * 5 MaxIts steps was taken
4267 
4268  -- ALGLIB --
4269  Copyright 11.01.2011 by Bochkanov Sergey
4270 *************************************************************************/
4271 void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &rep);
4272 
4273 
4274 /*************************************************************************
4275 QP results
4276 
4277 Buffered implementation of MinQPResults() which uses pre-allocated buffer
4278 to store X[]. If buffer size is too small, it resizes buffer. It is
4279 intended to be used in the inner cycles of performance critical algorithms
4280 where array reallocation penalty is too large to be ignored.
4281 
4282  -- ALGLIB --
4283  Copyright 11.01.2011 by Bochkanov Sergey
4284 *************************************************************************/
4286 
4287 /*************************************************************************
4288  NONLINEARLY CONSTRAINED OPTIMIZATION
4289  WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM
4290 
4291 DESCRIPTION:
4292 The subroutine minimizes function F(x) of N arguments subject to any
4293 combination of:
4294 * bound constraints
4295 * linear inequality constraints
4296 * linear equality constraints
4297 * nonlinear equality constraints Gi(x)=0
4298 * nonlinear inequality constraints Hi(x)<=0
4299 
4300 REQUIREMENTS:
4301 * user must provide function value and gradient for F(), H(), G()
4302 * starting point X0 must be feasible or not too far away from the feasible
4303  set
4304 * F(), G(), H() are twice continuously differentiable on the feasible set
4305  and its neighborhood
4306 * nonlinear constraints G() and H() must have non-zero gradient at G(x)=0
4307  and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0 is
4308  NOT supported.
4309 
4310 USAGE:
4311 
4312 Constrained optimization if far more complex than the unconstrained one.
4313 Nonlinearly constrained optimization is one of the most esoteric numerical
4314 procedures.
4315 
4316 Here we give very brief outline of the MinNLC optimizer. We strongly
4317 recommend you to study examples in the ALGLIB Reference Manual and to read
4318 ALGLIB User Guide on optimization, which is available at
4319 http://www.alglib.net/optimization/
4320 
4321 1. User initializes algorithm state with MinNLCCreate() call and chooses
4322  what NLC solver to use. There is some solver which is used by default,
4323  with default settings, but you should NOT rely on default choice. It
4324  may change in future releases of ALGLIB without notice, and no one can
4325  guarantee that new solver will be able to solve your problem with
4326  default settings.
4327 
4328  From the other side, if you choose solver explicitly, you can be pretty
4329  sure that it will work with new ALGLIB releases.
4330 
4331  In the current release following solvers can be used:
4332  * AUL solver (activated with MinNLCSetAlgoAUL() function)
4333 
4334 2. User adds boundary and/or linear and/or nonlinear constraints by means
4335  of calling one of the following functions:
4336  a) MinNLCSetBC() for boundary constraints
4337  b) MinNLCSetLC() for linear constraints
4338  c) MinNLCSetNLC() for nonlinear constraints
4339  You may combine (a), (b) and (c) in one optimization problem.
4340 
4341 3. User sets scale of the variables with MinNLCSetScale() function. It is
4342  VERY important to set scale of the variables, because nonlinearly
4343  constrained problems are hard to solve when variables are badly scaled.
4344 
4345 4. User sets stopping conditions with MinNLCSetCond(). If NLC solver
4346  uses inner/outer iteration layout, this function sets stopping
4347  conditions for INNER iterations.
4348 
4349 5. User chooses one of the preconditioning methods. Preconditioning is
4350  very important for efficient handling of boundary/linear/nonlinear
4351  constraints. Without preconditioning algorithm would require thousands
4352  of iterations even for simple problems. Several preconditioners can be
4353  used:
4354  a) inexact LBFGS-based, with O(N*K) evaluation time
4355  b) exact low rank one, with O(N*K^2) evaluation time
4356  c) exact robust one, with O(N^3+K*N^2) evaluation time
4357  where K is a total number of general linear and nonlinear constraints
4358  (box ones are not counted).
4359  Since version 3.11.0 ALGLIB uses exact robust preconditioner as default
4360  option, but in some cases exact low rank one may be better option.
4361 
4362 6. Finally, user calls MinNLCOptimize() function which takes algorithm
4363  state and pointer (delegate, etc.) to callback function which calculates
4364  F/G/H.
4365 
4366 7. User calls MinNLCResults() to get solution
4367 
4368 8. Optionally user may call MinNLCRestartFrom() to solve another problem
4369  with same N but another starting point. MinNLCRestartFrom() allows to
4370  reuse already initialized structure.
4371 
4372 
4373 INPUT PARAMETERS:
4374  N - problem dimension, N>0:
4375  * if given, only leading N elements of X are used
4376  * if not given, automatically determined from size ofX
4377  X - starting point, array[N]:
4378  * it is better to set X to a feasible point
4379  * but X can be infeasible, in which case algorithm will try
4380  to find feasible point first, using X as initial
4381  approximation.
4382 
4383 OUTPUT PARAMETERS:
4384  State - structure stores algorithm state
4385 
4386  -- ALGLIB --
4387  Copyright 06.06.2014 by Bochkanov Sergey
4388 *************************************************************************/
4389 void minnlccreate(const ae_int_t n, const real_1d_array &x, minnlcstate &state);
4390 void minnlccreate(const real_1d_array &x, minnlcstate &state);
4391 
4392 
4393 /*************************************************************************
4394 This subroutine is a finite difference variant of MinNLCCreate(). It uses
4395 finite differences in order to differentiate target function.
4396 
4397 Description below contains information which is specific to this function
4398 only. We recommend to read comments on MinNLCCreate() in order to get more
4399 information about creation of NLC optimizer.
4400 
4401 INPUT PARAMETERS:
4402  N - problem dimension, N>0:
4403  * if given, only leading N elements of X are used
4404  * if not given, automatically determined from size ofX
4405  X - starting point, array[N]:
4406  * it is better to set X to a feasible point
4407  * but X can be infeasible, in which case algorithm will try
4408  to find feasible point first, using X as initial
4409  approximation.
4410  DiffStep- differentiation step, >0
4411 
4412 OUTPUT PARAMETERS:
4413  State - structure stores algorithm state
4414 
4415 NOTES:
4416 1. algorithm uses 4-point central formula for differentiation.
4417 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
4418  S[] is scaling vector which can be set by MinNLCSetScale() call.
4419 3. we recommend you to use moderate values of differentiation step. Too
4420  large step will result in too large TRUNCATION errors, while too small
4421  step will result in too large NUMERICAL errors. 1.0E-4 can be good
4422  value to start from.
4423 4. Numerical differentiation is very inefficient - one gradient
4424  calculation needs 4*N function evaluations. This function will work for
4425  any N - either small (1...10), moderate (10...100) or large (100...).
4426  However, performance penalty will be too severe for any N's except for
4427  small ones.
4428  We should also say that code which relies on numerical differentiation
4429  is less robust and precise. Imprecise gradient may slow down
4430  convergence, especially on highly nonlinear problems.
4431  Thus we recommend to use this function for fast prototyping on small-
4432  dimensional problems only, and to implement analytical gradient as soon
4433  as possible.
4434 
4435  -- ALGLIB --
4436  Copyright 06.06.2014 by Bochkanov Sergey
4437 *************************************************************************/
4438 void minnlccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnlcstate &state);
4439 void minnlccreatef(const real_1d_array &x, const double diffstep, minnlcstate &state);
4440 
4441 
4442 /*************************************************************************
4443 This function sets boundary constraints for NLC optimizer.
4444 
4445 Boundary constraints are inactive by default (after initial creation).
4446 They are preserved after algorithm restart with MinNLCRestartFrom().
4447 
4448 You may combine boundary constraints with general linear ones - and with
4449 nonlinear ones! Boundary constraints are handled more efficiently than
4450 other types. Thus, if your problem has mixed constraints, you may
4451 explicitly specify some of them as boundary and save some time/space.
4452 
4453 INPUT PARAMETERS:
4454  State - structure stores algorithm state
4455  BndL - lower bounds, array[N].
4456  If some (all) variables are unbounded, you may specify
4457  very small number or -INF.
4458  BndU - upper bounds, array[N].
4459  If some (all) variables are unbounded, you may specify
4460  very large number or +INF.
4461 
4462 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
4463 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
4464 
4465 NOTE 2: when you solve your problem with augmented Lagrangian solver,
4466  boundary constraints are satisfied only approximately! It is
4467  possible that algorithm will evaluate function outside of
4468  feasible area!
4469 
4470  -- ALGLIB --
4471  Copyright 06.06.2014 by Bochkanov Sergey
4472 *************************************************************************/
4473 void minnlcsetbc(const minnlcstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
4474 
4475 
4476 /*************************************************************************
4477 This function sets linear constraints for MinNLC optimizer.
4478 
4479 Linear constraints are inactive by default (after initial creation). They
4480 are preserved after algorithm restart with MinNLCRestartFrom().
4481 
4482 You may combine linear constraints with boundary ones - and with nonlinear
4483 ones! If your problem has mixed constraints, you may explicitly specify
4484 some of them as linear. It may help optimizer to handle them more
4485 efficiently.
4486 
4487 INPUT PARAMETERS:
4488  State - structure previously allocated with MinNLCCreate call.
4489  C - linear constraints, array[K,N+1].
4490  Each row of C represents one constraint, either equality
4491  or inequality (see below):
4492  * first N elements correspond to coefficients,
4493  * last element corresponds to the right part.
4494  All elements of C (including right part) must be finite.
4495  CT - type of constraints, array[K]:
4496  * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
4497  * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
4498  * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
4499  K - number of equality/inequality constraints, K>=0:
4500  * if given, only leading K elements of C/CT are used
4501  * if not given, automatically determined from sizes of C/CT
4502 
4503 NOTE 1: when you solve your problem with augmented Lagrangian solver,
4504  linear constraints are satisfied only approximately! It is
4505  possible that algorithm will evaluate function outside of
4506  feasible area!
4507 
4508  -- ALGLIB --
4509  Copyright 06.06.2014 by Bochkanov Sergey
4510 *************************************************************************/
4511 void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
4512 void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct);
4513 
4514 
4515 /*************************************************************************
4516 This function sets nonlinear constraints for MinNLC optimizer.
4517 
4518 In fact, this function sets NUMBER of nonlinear constraints. Constraints
4519 itself (constraint functions) are passed to MinNLCOptimize() method. This
4520 method requires user-defined vector function F[] and its Jacobian J[],
4521 where:
4522 * first component of F[] and first row of Jacobian J[] corresponds to
4523  function being minimized
4524 * next NLEC components of F[] (and rows of J) correspond to nonlinear
4525  equality constraints G_i(x)=0
4526 * next NLIC components of F[] (and rows of J) correspond to nonlinear
4527  inequality constraints H_i(x)<=0
4528 
4529 NOTE: you may combine nonlinear constraints with linear/boundary ones. If
4530  your problem has mixed constraints, you may explicitly specify some
4531  of them as linear ones. It may help optimizer to handle them more
4532  efficiently.
4533 
4534 INPUT PARAMETERS:
4535  State - structure previously allocated with MinNLCCreate call.
4536  NLEC - number of Non-Linear Equality Constraints (NLEC), >=0
4537  NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0
4538 
4539 NOTE 1: when you solve your problem with augmented Lagrangian solver,
4540  nonlinear constraints are satisfied only approximately! It is
4541  possible that algorithm will evaluate function outside of
4542  feasible area!
4543 
4544 NOTE 2: algorithm scales variables according to scale specified by
4545  MinNLCSetScale() function, so it can handle problems with badly
4546  scaled variables (as long as we KNOW their scales).
4547 
4548  However, there is no way to automatically scale nonlinear
4549  constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may
4550  ruin convergence. Solving problem with constraint "1000*G0(x)=0"
4551  is NOT same as solving it with constraint "0.001*G0(x)=0".
4552 
4553  It means that YOU are the one who is responsible for correct
4554  scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you
4555  to scale nonlinear constraints in such way that I-th component of
4556  dG/dX (or dH/dx) has approximately unit magnitude (for problems
4557  with unit scale) or has magnitude approximately equal to 1/S[i]
4558  (where S is a scale set by MinNLCSetScale() function).
4559 
4560 
4561  -- ALGLIB --
4562  Copyright 06.06.2014 by Bochkanov Sergey
4563 *************************************************************************/
4564 void minnlcsetnlc(const minnlcstate &state, const ae_int_t nlec, const ae_int_t nlic);
4565 
4566 
4567 /*************************************************************************
4568 This function sets stopping conditions for inner iterations of optimizer.
4569 
4570 INPUT PARAMETERS:
4571  State - structure which stores algorithm state
4572  EpsG - >=0
4573  The subroutine finishes its work if the condition
4574  |v|<EpsG is satisfied, where:
4575  * |.| means Euclidian norm
4576  * v - scaled gradient vector, v[i]=g[i]*s[i]
4577  * g - gradient
4578  * s - scaling coefficients set by MinNLCSetScale()
4579  EpsF - >=0
4580  The subroutine finishes its work if on k+1-th iteration
4581  the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
4582  is satisfied.
4583  EpsX - >=0
4584  The subroutine finishes its work if on k+1-th iteration
4585  the condition |v|<=EpsX is fulfilled, where:
4586  * |.| means Euclidian norm
4587  * v - scaled step vector, v[i]=dx[i]/s[i]
4588  * dx - step vector, dx=X(k+1)-X(k)
4589  * s - scaling coefficients set by MinNLCSetScale()
4590  MaxIts - maximum number of iterations. If MaxIts=0, the number of
4591  iterations is unlimited.
4592 
4593 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
4594 to automatic stopping criterion selection.
4595 
4596  -- ALGLIB --
4597  Copyright 06.06.2014 by Bochkanov Sergey
4598 *************************************************************************/
4599 void minnlcsetcond(const minnlcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
4600 
4601 
4602 /*************************************************************************
4603 This function sets scaling coefficients for NLC optimizer.
4604 
4605 ALGLIB optimizers use scaling matrices to test stopping conditions (step
4606 size and gradient are scaled before comparison with tolerances). Scale of
4607 the I-th variable is a translation invariant measure of:
4608 a) "how large" the variable is
4609 b) how large the step should be to make significant changes in the function
4610 
4611 Scaling is also used by finite difference variant of the optimizer - step
4612 along I-th axis is equal to DiffStep*S[I].
4613 
4614 INPUT PARAMETERS:
4615  State - structure stores algorithm state
4616  S - array[N], non-zero scaling coefficients
4617  S[i] may be negative, sign doesn't matter.
4618 
4619  -- ALGLIB --
4620  Copyright 06.06.2014 by Bochkanov Sergey
4621 *************************************************************************/
4622 void minnlcsetscale(const minnlcstate &state, const real_1d_array &s);
4623 
4624 
4625 /*************************************************************************
4626 This function sets preconditioner to "inexact LBFGS-based" mode.
4627 
4628 Preconditioning is very important for convergence of Augmented Lagrangian
4629 algorithm because presence of penalty term makes problem ill-conditioned.
4630 Difference between performance of preconditioned and unpreconditioned
4631 methods can be as large as 100x!
4632 
4633 MinNLC optimizer may use following preconditioners, each with its own
4634 benefits and drawbacks:
4635  a) inexact LBFGS-based, with O(N*K) evaluation time
4636  b) exact low rank one, with O(N*K^2) evaluation time
4637  c) exact robust one, with O(N^3+K*N^2) evaluation time
4638 where K is a total number of general linear and nonlinear constraints (box
4639 ones are not counted).
4640 
4641 Inexact LBFGS-based preconditioner uses L-BFGS formula combined with
4642 orthogonality assumption to perform very fast updates. For a N-dimensional
4643 problem with K general linear or nonlinear constraints (boundary ones are
4644 not counted) it has O(N*K) cost per iteration. This preconditioner has
4645 best quality (less iterations) when general linear and nonlinear
4646 constraints are orthogonal to each other (orthogonality with respect to
4647 boundary constraints is not required). Number of iterations increases when
4648 constraints are non-orthogonal, because algorithm assumes orthogonality,
4649 but still it is better than no preconditioner at all.
4650 
4651 INPUT PARAMETERS:
4652  State - structure stores algorithm state
4653 
4654  -- ALGLIB --
4655  Copyright 26.09.2014 by Bochkanov Sergey
4656 *************************************************************************/
4658 
4659 
4660 /*************************************************************************
4661 This function sets preconditioner to "exact low rank" mode.
4662 
4663 Preconditioning is very important for convergence of Augmented Lagrangian
4664 algorithm because presence of penalty term makes problem ill-conditioned.
4665 Difference between performance of preconditioned and unpreconditioned
4666 methods can be as large as 100x!
4667 
4668 MinNLC optimizer may use following preconditioners, each with its own
4669 benefits and drawbacks:
4670  a) inexact LBFGS-based, with O(N*K) evaluation time
4671  b) exact low rank one, with O(N*K^2) evaluation time
4672  c) exact robust one, with O(N^3+K*N^2) evaluation time
4673 where K is a total number of general linear and nonlinear constraints (box
4674 ones are not counted).
4675 
4676 It also provides special unpreconditioned mode of operation which can be
4677 used for test purposes. Comments below discuss low rank preconditioner.
4678 
4679 Exact low-rank preconditioner uses Woodbury matrix identity to build
4680 quadratic model of the penalized function. It has following features:
4681 * no special assumptions about orthogonality of constraints
4682 * preconditioner evaluation is optimized for K<<N. Its cost is O(N*K^2),
4683  so it may become prohibitively slow for K>=N.
4684 * finally, stability of the process is guaranteed only for K<<N. Woodbury
4685  update often fail for K>=N due to degeneracy of intermediate matrices.
4686  That's why we recommend to use "exact robust" preconditioner for such
4687  cases.
4688 
4689 RECOMMENDATIONS
4690 
4691 We recommend to choose between "exact low rank" and "exact robust"
4692 preconditioners, with "low rank" version being chosen when you know in
4693 advance that total count of non-box constraints won't exceed N, and "robust"
4694 version being chosen when you need bulletproof solution.
4695 
4696 INPUT PARAMETERS:
4697  State - structure stores algorithm state
4698  UpdateFreq- update frequency. Preconditioner is rebuilt after every
4699  UpdateFreq iterations. Recommended value: 10 or higher.
4700  Zero value means that good default value will be used.
4701 
4702  -- ALGLIB --
4703  Copyright 26.09.2014 by Bochkanov Sergey
4704 *************************************************************************/
4705 void minnlcsetprecexactlowrank(const minnlcstate &state, const ae_int_t updatefreq);
4706 
4707 
4708 /*************************************************************************
4709 This function sets preconditioner to "exact robust" mode.
4710 
4711 Preconditioning is very important for convergence of Augmented Lagrangian
4712 algorithm because presence of penalty term makes problem ill-conditioned.
4713 Difference between performance of preconditioned and unpreconditioned
4714 methods can be as large as 100x!
4715 
4716 MinNLC optimizer may use following preconditioners, each with its own
4717 benefits and drawbacks:
4718  a) inexact LBFGS-based, with O(N*K) evaluation time
4719  b) exact low rank one, with O(N*K^2) evaluation time
4720  c) exact robust one, with O(N^3+K*N^2) evaluation time
4721 where K is a total number of general linear and nonlinear constraints (box
4722 ones are not counted).
4723 
4724 It also provides special unpreconditioned mode of operation which can be
4725 used for test purposes. Comments below discuss robust preconditioner.
4726 
4727 Exact robust preconditioner uses Cholesky decomposition to invert
4728 approximate Hessian matrix H=D+W'*C*W (where D stands for diagonal terms
4729 of Hessian, combined result of initial scaling matrix and penalty from box
4730 constraints; W stands for general linear constraints and linearization of
4731 nonlinear ones; C stands for diagonal matrix of penalty coefficients).
4732 
4733 This preconditioner has following features:
4734 * no special assumptions about constraint structure
4735 * preconditioner is optimized for stability; unlike "exact low rank"
4736  version which fails for K>=N, this one works well for any value of K.
4737 * the only drawback is that is takes O(N^3+K*N^2) time to build it. No
4738  economical Woodbury update is applied even when it makes sense, thus
4739  there are exist situations (K<<N) when "exact low rank" preconditioner
4740  outperforms this one.
4741 
4742 RECOMMENDATIONS
4743 
4744 We recommend to choose between "exact low rank" and "exact robust"
4745 preconditioners, with "low rank" version being chosen when you know in
4746 advance that total count of non-box constraints won't exceed N, and "robust"
4747 version being chosen when you need bulletproof solution.
4748 
4749 INPUT PARAMETERS:
4750  State - structure stores algorithm state
4751  UpdateFreq- update frequency. Preconditioner is rebuilt after every
4752  UpdateFreq iterations. Recommended value: 10 or higher.
4753  Zero value means that good default value will be used.
4754 
4755  -- ALGLIB --
4756  Copyright 26.09.2014 by Bochkanov Sergey
4757 *************************************************************************/
4758 void minnlcsetprecexactrobust(const minnlcstate &state, const ae_int_t updatefreq);
4759 
4760 
4761 /*************************************************************************
4762 This function sets preconditioner to "turned off" mode.
4763 
4764 Preconditioning is very important for convergence of Augmented Lagrangian
4765 algorithm because presence of penalty term makes problem ill-conditioned.
4766 Difference between performance of preconditioned and unpreconditioned
4767 methods can be as large as 100x!
4768 
4769 MinNLC optimizer may utilize two preconditioners, each with its own
4770 benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one.
4771 It also provides special unpreconditioned mode of operation which can be
4772 used for test purposes.
4773 
4774 This function activates this test mode. Do not use it in production code
4775 to solve real-life problems.
4776 
4777 INPUT PARAMETERS:
4778  State - structure stores algorithm state
4779 
4780  -- ALGLIB --
4781  Copyright 26.09.2014 by Bochkanov Sergey
4782 *************************************************************************/
4783 void minnlcsetprecnone(const minnlcstate &state);
4784 
4785 
4786 /*************************************************************************
4787 This function sets maximum step length (after scaling of step vector with
4788 respect to variable scales specified by minnlcsetscale() call).
4789 
4790 INPUT PARAMETERS:
4791  State - structure which stores algorithm state
4792  StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if
4793  you don't want to limit step length.
4794 
4795 Use this subroutine when you optimize target function which contains exp()
4796 or other fast growing functions, and optimization algorithm makes too
4797 large steps which leads to overflow. This function allows us to reject
4798 steps that are too large (and therefore expose us to the possible
4799 overflow) without actually calculating function value at the x+stp*d.
4800 
4801  -- ALGLIB --
4802  Copyright 02.04.2010 by Bochkanov Sergey
4803 *************************************************************************/
4804 void minnlcsetstpmax(const minnlcstate &state, const double stpmax);
4805 
4806 
4807 /*************************************************************************
4808 This function tells MinNLC unit to use Augmented Lagrangian algorithm
4809 for nonlinearly constrained optimization. This algorithm is a slight
4810 modification of one described in "A Modified Barrier-Augmented Lagrangian
4811 Method for Constrained Minimization (1999)" by D.GOLDFARB, R.POLYAK,
4812 K. SCHEINBERG, I.YUZEFOVICH.
4813 
4814 Augmented Lagrangian algorithm works by converting problem of minimizing
4815 F(x) subject to equality/inequality constraints to unconstrained problem
4816 of the form
4817 
4818  min[ f(x) +
4819  + Rho*PENALTY_EQ(x) + SHIFT_EQ(x,Nu1) +
4820  + Rho*PENALTY_INEQ(x) + SHIFT_INEQ(x,Nu2) ]
4821 
4822 where:
4823 * Rho is a fixed penalization coefficient
4824 * PENALTY_EQ(x) is a penalty term, which is used to APPROXIMATELY enforce
4825  equality constraints
4826 * SHIFT_EQ(x) is a special "shift" term which is used to "fine-tune"
4827  equality constraints, greatly increasing precision
4828 * PENALTY_INEQ(x) is a penalty term which is used to approximately enforce
4829  inequality constraints
4830 * SHIFT_INEQ(x) is a special "shift" term which is used to "fine-tune"
4831  inequality constraints, greatly increasing precision
4832 * Nu1/Nu2 are vectors of Lagrange coefficients which are fine-tuned during
4833  outer iterations of algorithm
4834 
4835 This version of AUL algorithm uses preconditioner, which greatly
4836 accelerates convergence. Because this algorithm is similar to penalty
4837 methods, it may perform steps into infeasible area. All kinds of
4838 constraints (boundary, linear and nonlinear ones) may be violated in
4839 intermediate points - and in the solution. However, properly configured
4840 AUL method is significantly better at handling constraints than barrier
4841 and/or penalty methods.
4842 
4843 The very basic outline of algorithm is given below:
4844 1) first outer iteration is performed with "default" values of Lagrange
4845  multipliers Nu1/Nu2. Solution quality is low (candidate point can be
4846  too far away from true solution; large violation of constraints is
4847  possible) and is comparable with that of penalty methods.
4848 2) subsequent outer iterations refine Lagrange multipliers and improve
4849  quality of the solution.
4850 
4851 INPUT PARAMETERS:
4852  State - structure which stores algorithm state
4853  Rho - penalty coefficient, Rho>0:
4854  * large enough that algorithm converges with desired
4855  precision. Minimum value is 10*max(S'*diag(H)*S), where
4856  S is a scale matrix (set by MinNLCSetScale) and H is a
4857  Hessian of the function being minimized. If you can not
4858  easily estimate Hessian norm, see our recommendations
4859  below.
4860  * not TOO large to prevent ill-conditioning
4861  * for unit-scale problems (variables and Hessian have unit
4862  magnitude), Rho=100 or Rho=1000 can be used.
4863  * it is important to note that Rho is internally multiplied
4864  by scaling matrix, i.e. optimum value of Rho depends on
4865  scale of variables specified by MinNLCSetScale().
4866  ItsCnt - number of outer iterations:
4867  * ItsCnt=0 means that small number of outer iterations is
4868  automatically chosen (10 iterations in current version).
4869  * ItsCnt=1 means that AUL algorithm performs just as usual
4870  barrier method.
4871  * ItsCnt>1 means that AUL algorithm performs specified
4872  number of outer iterations
4873 
4874 HOW TO CHOOSE PARAMETERS
4875 
4876 Nonlinear optimization is a tricky area and Augmented Lagrangian algorithm
4877 is sometimes hard to tune. Good values of Rho and ItsCnt are problem-
4878 specific. In order to help you we prepared following set of
4879 recommendations:
4880 
4881 * for unit-scale problems (variables and Hessian have unit magnitude),
4882  Rho=100 or Rho=1000 can be used.
4883 
4884 * start from some small value of Rho and solve problem with just one
4885  outer iteration (ItcCnt=1). In this case algorithm behaves like penalty
4886  method. Increase Rho in 2x or 10x steps until you see that one outer
4887  iteration returns point which is "rough approximation to solution".
4888 
4889  It is very important to have Rho so large that penalty term becomes
4890  constraining i.e. modified function becomes highly convex in constrained
4891  directions.
4892 
4893  From the other side, too large Rho may prevent you from converging to
4894  the solution. You can diagnose it by studying number of inner iterations
4895  performed by algorithm: too few (5-10 on 1000-dimensional problem) or
4896  too many (orders of magnitude more than dimensionality) usually means
4897  that Rho is too large.
4898 
4899 * with just one outer iteration you usually have low-quality solution.
4900  Some constraints can be violated with very large margin, while other
4901  ones (which are NOT violated in the true solution) can push final point
4902  too far in the inner area of the feasible set.
4903 
4904  For example, if you have constraint x0>=0 and true solution x0=1, then
4905  merely a presence of "x0>=0" will introduce a bias towards larger values
4906  of x0. Say, algorithm may stop at x0=1.5 instead of 1.0.
4907 
4908 * after you found good Rho, you may increase number of outer iterations.
4909  ItsCnt=10 is a good value. Subsequent outer iteration will refine values
4910  of Lagrange multipliers. Constraints which were violated will be
4911  enforced, inactive constraints will be dropped (corresponding multipliers
4912  will be decreased). Ideally, you should see 10-1000x improvement in
4913  constraint handling (constraint violation is reduced).
4914 
4915 * if you see that algorithm converges to vicinity of solution, but
4916  additional outer iterations do not refine solution, it may mean that
4917  algorithm is unstable - it wanders around true solution, but can not
4918  approach it. Sometimes algorithm may be stabilized by increasing Rho one
4919  more time, making it 5x or 10x larger.
4920 
4921 SCALING OF CONSTRAINTS [IMPORTANT]
4922 
4923 AUL optimizer scales variables according to scale specified by
4924 MinNLCSetScale() function, so it can handle problems with badly scaled
4925 variables (as long as we KNOW their scales). However, because function
4926 being optimized is a mix of original function and constraint-dependent
4927 penalty functions, it is important to rescale both variables AND
4928 constraints.
4929 
4930 Say, if you minimize f(x)=x^2 subject to 1000000*x>=0, then you have
4931 constraint whose scale is different from that of target function (another
4932 example is 0.000001*x>=0). It is also possible to have constraints whose
4933 scales are misaligned: 1000000*x0>=0, 0.000001*x1<=0. Inappropriate
4934 scaling may ruin convergence because minimizing x^2 subject to x>=0 is NOT
4935 same as minimizing it subject to 1000000*x>=0.
4936 
4937 Because we know coefficients of boundary/linear constraints, we can
4938 automatically rescale and normalize them. However, there is no way to
4939 automatically rescale nonlinear constraints Gi(x) and Hi(x) - they are
4940 black boxes.
4941 
4942 It means that YOU are the one who is responsible for correct scaling of
4943 nonlinear constraints Gi(x) and Hi(x). We recommend you to rescale
4944 nonlinear constraints in such way that I-th component of dG/dX (or dH/dx)
4945 has magnitude approximately equal to 1/S[i] (where S is a scale set by
4946 MinNLCSetScale() function).
4947 
4948 WHAT IF IT DOES NOT CONVERGE?
4949 
4950 It is possible that AUL algorithm fails to converge to precise values of
4951 Lagrange multipliers. It stops somewhere around true solution, but candidate
4952 point is still too far from solution, and some constraints are violated.
4953 Such kind of failure is specific for Lagrangian algorithms - technically,
4954 they stop at some point, but this point is not constrained solution.
4955 
4956 There are exist several reasons why algorithm may fail to converge:
4957 a) too loose stopping criteria for inner iteration
4958 b) degenerate, redundant constraints
4959 c) target function has unconstrained extremum exactly at the boundary of
4960  some constraint
4961 d) numerical noise in the target function
4962 
4963 In all these cases algorithm is unstable - each outer iteration results in
4964 large and almost random step which improves handling of some constraints,
4965 but violates other ones (ideally outer iterations should form a sequence
4966 of progressively decreasing steps towards solution).
4967 
4968 First reason possible is that too loose stopping criteria for inner
4969 iteration were specified. Augmented Lagrangian algorithm solves a sequence
4970 of intermediate problems, and requries each of them to be solved with high
4971 precision. Insufficient precision results in incorrect update of Lagrange
4972 multipliers.
4973 
4974 Another reason is that you may have specified degenerate constraints: say,
4975 some constraint was repeated twice. In most cases AUL algorithm gracefully
4976 handles such situations, but sometimes it may spend too much time figuring
4977 out subtle degeneracies in constraint matrix.
4978 
4979 Third reason is tricky and hard to diagnose. Consider situation when you
4980 minimize f=x^2 subject to constraint x>=0. Unconstrained extremum is
4981 located exactly at the boundary of constrained area. In this case
4982 algorithm will tend to oscillate between negative and positive x. Each
4983 time it stops at x<0 it "reinforces" constraint x>=0, and each time it is
4984 bounced to x>0 it "relaxes" constraint (and is attracted to x<0).
4985 
4986 Such situation sometimes happens in problems with hidden symetries.
4987 Algorithm is got caught in a loop with Lagrange multipliers being
4988 continuously increased/decreased. Luckily, such loop forms after at least
4989 three iterations, so this problem can be solved by DECREASING number of
4990 outer iterations down to 1-2 and increasing penalty coefficient Rho as
4991 much as possible.
4992 
4993 Final reason is numerical noise. AUL algorithm is robust against moderate
4994 noise (more robust than, say, active set methods), but large noise may
4995 destabilize algorithm.
4996 
4997  -- ALGLIB --
4998  Copyright 06.06.2014 by Bochkanov Sergey
4999 *************************************************************************/
5000 void minnlcsetalgoaul(const minnlcstate &state, const double rho, const ae_int_t itscnt);
5001 
5002 
5003 /*************************************************************************
5004 This function turns on/off reporting.
5005 
5006 INPUT PARAMETERS:
5007  State - structure which stores algorithm state
5008  NeedXRep- whether iteration reports are needed or not
5009 
5010 If NeedXRep is True, algorithm will call rep() callback function if it is
5011 provided to MinNLCOptimize().
5012 
5013 NOTE: algorithm passes two parameters to rep() callback - current point
5014  and penalized function value at current point. Important - function
5015  value which is returned is NOT function being minimized. It is sum
5016  of the value of the function being minimized - and penalty term.
5017 
5018  -- ALGLIB --
5019  Copyright 28.11.2010 by Bochkanov Sergey
5020 *************************************************************************/
5021 void minnlcsetxrep(const minnlcstate &state, const bool needxrep);
5022 
5023 
5024 /*************************************************************************
5025 This function provides reverse communication interface
5026 Reverse communication interface is not documented or recommended to use.
5027 See below for functions which provide better documented API
5028 *************************************************************************/
5029 bool minnlciteration(const minnlcstate &state);
5030 
5031 
5032 /*************************************************************************
5033 This family of functions is used to launcn iterations of nonlinear optimizer
5034 
5035 These functions accept following parameters:
5036  state - algorithm state
5037  fvec - callback which calculates function vector fi[]
5038  at given point x
5039  jac - callback which calculates function vector fi[]
5040  and Jacobian jac at given point x
5041  rep - optional callback which is called after each iteration
5042  can be NULL
5043  ptr - optional pointer which is passed to func/grad/hess/jac/rep
5044  can be NULL
5045 
5046 
5047 NOTES:
5048 
5049 1. This function has two different implementations: one which uses exact
5050  (analytical) user-supplied Jacobian, and one which uses only function
5051  vector and numerically differentiates function in order to obtain
5052  gradient.
5053 
5054  Depending on the specific function used to create optimizer object
5055  you should choose appropriate variant of MinNLCOptimize() - one which
5056  accepts function AND Jacobian or one which accepts ONLY function.
5057 
5058  Be careful to choose variant of MinNLCOptimize() which corresponds to
5059  your optimization scheme! Table below lists different combinations of
5060  callback (function/gradient) passed to MinNLCOptimize() and specific
5061  function used to create optimizer.
5062 
5063 
5064  | USER PASSED TO MinNLCOptimize()
5065  CREATED WITH | function only | function and gradient
5066  ------------------------------------------------------------
5067  MinNLCCreateF() | works FAILS
5068  MinNLCCreate() | FAILS works
5069 
5070  Here "FAILS" denotes inappropriate combinations of optimizer creation
5071  function and MinNLCOptimize() version. Attemps to use such
5072  combination will lead to exception. Either you did not pass gradient
5073  when it WAS needed or you passed gradient when it was NOT needed.
5074 
5075  -- ALGLIB --
5076  Copyright 06.06.2014 by Bochkanov Sergey
5077 
5078 *************************************************************************/
5080  void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
5081  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
5082  void *ptr = NULL);
5084  void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
5085  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
5086  void *ptr = NULL);
5087 
5088 
5089 /*************************************************************************
5090 MinNLC results
5091 
5092 INPUT PARAMETERS:
5093  State - algorithm state
5094 
5095 OUTPUT PARAMETERS:
5096  X - array[0..N-1], solution
5097  Rep - optimization report. You should check Rep.TerminationType
5098  in order to distinguish successful termination from
5099  unsuccessful one:
5100  * -8 internal integrity control detected infinite or
5101  NAN values in function/gradient. Abnormal
5102  termination signalled.
5103  * -7 gradient verification failed.
5104  See MinNLCSetGradientCheck() for more information.
5105  * 1 relative function improvement is no more than EpsF.
5106  * 2 scaled step is no more than EpsX.
5107  * 4 scaled gradient norm is no more than EpsG.
5108  * 5 MaxIts steps was taken
5109  More information about fields of this structure can be
5110  found in the comments on MinNLCReport datatype.
5111 
5112  -- ALGLIB --
5113  Copyright 06.06.2014 by Bochkanov Sergey
5114 *************************************************************************/
5116 
5117 
5118 /*************************************************************************
5119 NLC results
5120 
5121 Buffered implementation of MinNLCResults() which uses pre-allocated buffer
5122 to store X[]. If buffer size is too small, it resizes buffer. It is
5123 intended to be used in the inner cycles of performance critical algorithms
5124 where array reallocation penalty is too large to be ignored.
5125 
5126  -- ALGLIB --
5127  Copyright 28.11.2010 by Bochkanov Sergey
5128 *************************************************************************/
5130 
5131 
5132 /*************************************************************************
5133 This subroutine restarts algorithm from new point.
5134 All optimization parameters (including constraints) are left unchanged.
5135 
5136 This function allows to solve multiple optimization problems (which
5137 must have same number of dimensions) without object reallocation penalty.
5138 
5139 INPUT PARAMETERS:
5140  State - structure previously allocated with MinNLCCreate call.
5141  X - new starting point.
5142 
5143  -- ALGLIB --
5144  Copyright 28.11.2010 by Bochkanov Sergey
5145 *************************************************************************/
5146 void minnlcrestartfrom(const minnlcstate &state, const real_1d_array &x);
5147 
5148 
5149 /*************************************************************************
5150 This subroutine turns on verification of the user-supplied analytic
5151 gradient:
5152 * user calls this subroutine before optimization begins
5153 * MinNLCOptimize() is called
5154 * prior to actual optimization, for each component of parameters being
5155  optimized X[i] algorithm performs following steps:
5156  * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
5157  where X[i] is i-th component of the initial point and S[i] is a scale
5158  of i-th parameter
5159  * F(X) is evaluated at these trial points
5160  * we perform one more evaluation in the middle point of the interval
5161  * we build cubic model using function values and derivatives at trial
5162  points and we compare its prediction with actual value in the middle
5163  point
5164  * in case difference between prediction and actual value is higher than
5165  some predetermined threshold, algorithm stops with completion code -7;
5166  Rep.VarIdx is set to index of the parameter with incorrect derivative,
5167  and Rep.FuncIdx is set to index of the function.
5168 * after verification is over, algorithm proceeds to the actual optimization.
5169 
5170 NOTE 1: verification needs N (parameters count) gradient evaluations. It
5171  is very costly and you should use it only for low dimensional
5172  problems, when you want to be sure that you've correctly
5173  calculated analytic derivatives. You should not use it in the
5174  production code (unless you want to check derivatives provided by
5175  some third party).
5176 
5177 NOTE 2: you should carefully choose TestStep. Value which is too large
5178  (so large that function behaviour is significantly non-cubic) will
5179  lead to false alarms. You may use different step for different
5180  parameters by means of setting scale with MinNLCSetScale().
5181 
5182 NOTE 3: this function may lead to false positives. In case it reports that
5183  I-th derivative was calculated incorrectly, you may decrease test
5184  step and try one more time - maybe your function changes too
5185  sharply and your step is too large for such rapidly chanding
5186  function.
5187 
5188 INPUT PARAMETERS:
5189  State - structure used to store algorithm state
5190  TestStep - verification step:
5191  * TestStep=0 turns verification off
5192  * TestStep>0 activates verification
5193 
5194  -- ALGLIB --
5195  Copyright 15.06.2014 by Bochkanov Sergey
5196 *************************************************************************/
5197 void minnlcsetgradientcheck(const minnlcstate &state, const double teststep);
5198 
5199 /*************************************************************************
5200  BOX CONSTRAINED OPTIMIZATION
5201  WITH FAST ACTIVATION OF MULTIPLE BOX CONSTRAINTS
5202 
5203 DESCRIPTION:
5204 The subroutine minimizes function F(x) of N arguments subject to box
5205 constraints (with some of box constraints actually being equality ones).
5206 
5207 This optimizer uses algorithm similar to that of MinBLEIC (optimizer with
5208 general linear constraints), but presence of box-only constraints allows
5209 us to use faster constraint activation strategies. On large-scale problems,
5210 with multiple constraints active at the solution, this optimizer can be
5211 several times faster than BLEIC.
5212 
5213 REQUIREMENTS:
5214 * user must provide function value and gradient
5215 * starting point X0 must be feasible or
5216  not too far away from the feasible set
5217 * grad(f) must be Lipschitz continuous on a level set:
5218  L = { x : f(x)<=f(x0) }
5219 * function must be defined everywhere on the feasible set F
5220 
5221 USAGE:
5222 
5223 Constrained optimization if far more complex than the unconstrained one.
5224 Here we give very brief outline of the BC optimizer. We strongly recommend
5225 you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide
5226 on optimization, which is available at http://www.alglib.net/optimization/
5227 
5228 1. User initializes algorithm state with MinBCCreate() call
5229 
5230 2. USer adds box constraints by calling MinBCSetBC() function.
5231 
5232 3. User sets stopping conditions with MinBCSetCond().
5233 
5234 4. User calls MinBCOptimize() function which takes algorithm state and
5235  pointer (delegate, etc.) to callback function which calculates F/G.
5236 
5237 5. User calls MinBCResults() to get solution
5238 
5239 6. Optionally user may call MinBCRestartFrom() to solve another problem
5240  with same N but another starting point.
5241  MinBCRestartFrom() allows to reuse already initialized structure.
5242 
5243 
5244 INPUT PARAMETERS:
5245  N - problem dimension, N>0:
5246  * if given, only leading N elements of X are used
5247  * if not given, automatically determined from size ofX
5248  X - starting point, array[N]:
5249  * it is better to set X to a feasible point
5250  * but X can be infeasible, in which case algorithm will try
5251  to find feasible point first, using X as initial
5252  approximation.
5253 
5254 OUTPUT PARAMETERS:
5255  State - structure stores algorithm state
5256 
5257  -- ALGLIB --
5258  Copyright 28.11.2010 by Bochkanov Sergey
5259 *************************************************************************/
5260 void minbccreate(const ae_int_t n, const real_1d_array &x, minbcstate &state);
5261 void minbccreate(const real_1d_array &x, minbcstate &state);
5262 
5263 
5264 /*************************************************************************
5265 The subroutine is finite difference variant of MinBCCreate(). It uses
5266 finite differences in order to differentiate target function.
5267 
5268 Description below contains information which is specific to this function
5269 only. We recommend to read comments on MinBCCreate() in order to get
5270 more information about creation of BC optimizer.
5271 
5272 INPUT PARAMETERS:
5273  N - problem dimension, N>0:
5274  * if given, only leading N elements of X are used
5275  * if not given, automatically determined from size of X
5276  X - starting point, array[0..N-1].
5277  DiffStep- differentiation step, >0
5278 
5279 OUTPUT PARAMETERS:
5280  State - structure which stores algorithm state
5281 
5282 NOTES:
5283 1. algorithm uses 4-point central formula for differentiation.
5284 2. differentiation step along I-th axis is equal to DiffStep*S[I] where
5285  S[] is scaling vector which can be set by MinBCSetScale() call.
5286 3. we recommend you to use moderate values of differentiation step. Too
5287  large step will result in too large truncation errors, while too small
5288  step will result in too large numerical errors. 1.0E-6 can be good
5289  value to start with.
5290 4. Numerical differentiation is very inefficient - one gradient
5291  calculation needs 4*N function evaluations. This function will work for
5292  any N - either small (1...10), moderate (10...100) or large (100...).
5293  However, performance penalty will be too severe for any N's except for
5294  small ones.
5295  We should also say that code which relies on numerical differentiation
5296  is less robust and precise. CG needs exact gradient values. Imprecise
5297  gradient may slow down convergence, especially on highly nonlinear
5298  problems.
5299  Thus we recommend to use this function for fast prototyping on small-
5300  dimensional problems only, and to implement analytical gradient as soon
5301  as possible.
5302 
5303  -- ALGLIB --
5304  Copyright 16.05.2011 by Bochkanov Sergey
5305 *************************************************************************/
5306 void minbccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbcstate &state);
5307 void minbccreatef(const real_1d_array &x, const double diffstep, minbcstate &state);
5308 
5309 
5310 /*************************************************************************
5311 This function sets boundary constraints for BC optimizer.
5312 
5313 Boundary constraints are inactive by default (after initial creation).
5314 They are preserved after algorithm restart with MinBCRestartFrom().
5315 
5316 INPUT PARAMETERS:
5317  State - structure stores algorithm state
5318  BndL - lower bounds, array[N].
5319  If some (all) variables are unbounded, you may specify
5320  very small number or -INF.
5321  BndU - upper bounds, array[N].
5322  If some (all) variables are unbounded, you may specify
5323  very large number or +INF.
5324 
5325 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
5326 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
5327 
5328 NOTE 2: this solver has following useful properties:
5329 * bound constraints are always satisfied exactly
5330 * function is evaluated only INSIDE area specified by bound constraints,
5331  even when numerical differentiation is used (algorithm adjusts nodes
5332  according to boundary constraints)
5333 
5334  -- ALGLIB --
5335  Copyright 28.11.2010 by Bochkanov Sergey
5336 *************************************************************************/
5337 void minbcsetbc(const minbcstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
5338 
5339 
5340 /*************************************************************************
5341 This function sets stopping conditions for the optimizer.
5342 
5343 INPUT PARAMETERS:
5344  State - structure which stores algorithm state
5345  EpsG - >=0
5346  The subroutine finishes its work if the condition
5347  |v|<EpsG is satisfied, where:
5348  * |.| means Euclidian norm
5349  * v - scaled gradient vector, v[i]=g[i]*s[i]
5350  * g - gradient
5351  * s - scaling coefficients set by MinBCSetScale()
5352  EpsF - >=0
5353  The subroutine finishes its work if on k+1-th iteration
5354  the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1}
5355  is satisfied.
5356  EpsX - >=0
5357  The subroutine finishes its work if on k+1-th iteration
5358  the condition |v|<=EpsX is fulfilled, where:
5359  * |.| means Euclidian norm
5360  * v - scaled step vector, v[i]=dx[i]/s[i]
5361  * dx - step vector, dx=X(k+1)-X(k)
5362  * s - scaling coefficients set by MinBCSetScale()
5363  MaxIts - maximum number of iterations. If MaxIts=0, the number of
5364  iterations is unlimited.
5365 
5366 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead
5367 to automatic stopping criterion selection.
5368 
5369 NOTE: when SetCond() called with non-zero MaxIts, BC solver may perform
5370  slightly more than MaxIts iterations. I.e., MaxIts sets non-strict
5371  limit on iterations count.
5372 
5373  -- ALGLIB --
5374  Copyright 28.11.2010 by Bochkanov Sergey
5375 *************************************************************************/
5376 void minbcsetcond(const minbcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
5377 
5378 
5379 /*************************************************************************
5380 This function sets scaling coefficients for BC optimizer.
5381 
5382 ALGLIB optimizers use scaling matrices to test stopping conditions (step
5383 size and gradient are scaled before comparison with tolerances). Scale of
5384 the I-th variable is a translation invariant measure of:
5385 a) "how large" the variable is
5386 b) how large the step should be to make significant changes in the function
5387 
5388 Scaling is also used by finite difference variant of the optimizer - step
5389 along I-th axis is equal to DiffStep*S[I].
5390 
5391 In most optimizers (and in the BC too) scaling is NOT a form of
5392 preconditioning. It just affects stopping conditions. You should set
5393 preconditioner by separate call to one of the MinBCSetPrec...()
5394 functions.
5395 
5396 There is a special preconditioning mode, however, which uses scaling
5397 coefficients to form diagonal preconditioning matrix. You can turn this
5398 mode on, if you want. But you should understand that scaling is not the
5399 same thing as preconditioning - these are two different, although related
5400 forms of tuning solver.
5401 
5402 INPUT PARAMETERS:
5403  State - structure stores algorithm state
5404  S - array[N], non-zero scaling coefficients
5405  S[i] may be negative, sign doesn't matter.
5406 
5407  -- ALGLIB --
5408  Copyright 14.01.2011 by Bochkanov Sergey
5409 *************************************************************************/
5410 void minbcsetscale(const minbcstate &state, const real_1d_array &s);
5411 
5412 
5413 /*************************************************************************
5414 Modification of the preconditioner: preconditioning is turned off.
5415 
5416 INPUT PARAMETERS:
5417  State - structure which stores algorithm state
5418 
5419  -- ALGLIB --
5420  Copyright 13.10.2010 by Bochkanov Sergey
5421 *************************************************************************/
5422 void minbcsetprecdefault(const minbcstate &state);
5423 
5424 
5425 /*************************************************************************
5426 Modification of the preconditioner: diagonal of approximate Hessian is
5427 used.
5428 
5429 INPUT PARAMETERS:
5430  State - structure which stores algorithm state
5431  D - diagonal of the approximate Hessian, array[0..N-1],
5432  (if larger, only leading N elements are used).
5433 
5434 NOTE 1: D[i] should be positive. Exception will be thrown otherwise.
5435 
5436 NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE.
5437 
5438  -- ALGLIB --
5439  Copyright 13.10.2010 by Bochkanov Sergey
5440 *************************************************************************/
5441 void minbcsetprecdiag(const minbcstate &state, const real_1d_array &d);
5442 
5443 
5444 /*************************************************************************
5445 Modification of the preconditioner: scale-based diagonal preconditioning.
5446 
5447 This preconditioning mode can be useful when you don't have approximate
5448 diagonal of Hessian, but you know that your variables are badly scaled
5449 (for example, one variable is in [1,10], and another in [1000,100000]),
5450 and most part of the ill-conditioning comes from different scales of vars.
5451 
5452 In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
5453 can greatly improve convergence.
5454 
5455 IMPRTANT: you should set scale of your variables with MinBCSetScale()
5456 call (before or after MinBCSetPrecScale() call). Without knowledge of
5457 the scale of your variables scale-based preconditioner will be just unit
5458 matrix.
5459 
5460 INPUT PARAMETERS:
5461  State - structure which stores algorithm state
5462 
5463  -- ALGLIB --
5464  Copyright 13.10.2010 by Bochkanov Sergey
5465 *************************************************************************/
5466 void minbcsetprecscale(const minbcstate &state);
5467 
5468 
5469 /*************************************************************************
5470 This function turns on/off reporting.
5471 
5472 INPUT PARAMETERS:
5473  State - structure which stores algorithm state
5474  NeedXRep- whether iteration reports are needed or not
5475 
5476 If NeedXRep is True, algorithm will call rep() callback function if it is
5477 provided to MinBCOptimize().
5478 
5479  -- ALGLIB --
5480  Copyright 28.11.2010 by Bochkanov Sergey
5481 *************************************************************************/
5482 void minbcsetxrep(const minbcstate &state, const bool needxrep);
5483 
5484 
5485 /*************************************************************************
5486 This function sets maximum step length
5487 
5488 INPUT PARAMETERS:
5489  State - structure which stores algorithm state
5490  StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
5491  want to limit step length.
5492 
5493 Use this subroutine when you optimize target function which contains exp()
5494 or other fast growing functions, and optimization algorithm makes too
5495 large steps which lead to overflow. This function allows us to reject
5496 steps that are too large (and therefore expose us to the possible
5497 overflow) without actually calculating function value at the x+stp*d.
5498 
5499  -- ALGLIB --
5500  Copyright 02.04.2010 by Bochkanov Sergey
5501 *************************************************************************/
5502 void minbcsetstpmax(const minbcstate &state, const double stpmax);
5503 
5504 
5505 /*************************************************************************
5506 This function provides reverse communication interface
5507 Reverse communication interface is not documented or recommended to use.
5508 See below for functions which provide better documented API
5509 *************************************************************************/
5510 bool minbciteration(const minbcstate &state);
5511 
5512 
5513 /*************************************************************************
5514 This family of functions is used to launcn iterations of nonlinear optimizer
5515 
5516 These functions accept following parameters:
5517  state - algorithm state
5518  func - callback which calculates function (or merit function)
5519  value func at given point x
5520  grad - callback which calculates function (or merit function)
5521  value func and gradient grad at given point x
5522  rep - optional callback which is called after each iteration
5523  can be NULL
5524  ptr - optional pointer which is passed to func/grad/hess/jac/rep
5525  can be NULL
5526 
5527 NOTES:
5528 
5529 1. This function has two different implementations: one which uses exact
5530  (analytical) user-supplied gradient, and one which uses function value
5531  only and numerically differentiates function in order to obtain
5532  gradient.
5533 
5534  Depending on the specific function used to create optimizer object
5535  (either MinBCCreate() for analytical gradient or MinBCCreateF()
5536  for numerical differentiation) you should choose appropriate variant of
5537  MinBCOptimize() - one which accepts function AND gradient or one
5538  which accepts function ONLY.
5539 
5540  Be careful to choose variant of MinBCOptimize() which corresponds to
5541  your optimization scheme! Table below lists different combinations of
5542  callback (function/gradient) passed to MinBCOptimize() and specific
5543  function used to create optimizer.
5544 
5545 
5546  | USER PASSED TO MinBCOptimize()
5547  CREATED WITH | function only | function and gradient
5548  ------------------------------------------------------------
5549  MinBCCreateF() | works FAILS
5550  MinBCCreate() | FAILS works
5551 
5552  Here "FAIL" denotes inappropriate combinations of optimizer creation
5553  function and MinBCOptimize() version. Attemps to use such
5554  combination (for example, to create optimizer with MinBCCreateF()
5555  and to pass gradient information to MinCGOptimize()) will lead to
5556  exception being thrown. Either you did not pass gradient when it WAS
5557  needed or you passed gradient when it was NOT needed.
5558 
5559  -- ALGLIB --
5560  Copyright 28.11.2010 by Bochkanov Sergey
5561 
5562 *************************************************************************/
5564  void (*func)(const real_1d_array &x, double &func, void *ptr),
5565  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
5566  void *ptr = NULL);
5568  void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
5569  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
5570  void *ptr = NULL);
5571 
5572 
5573 /*************************************************************************
5574 BC results
5575 
5576 INPUT PARAMETERS:
5577  State - algorithm state
5578 
5579 OUTPUT PARAMETERS:
5580  X - array[0..N-1], solution
5581  Rep - optimization report. You should check Rep.TerminationType
5582  in order to distinguish successful termination from
5583  unsuccessful one:
5584  * -8 internal integrity control detected infinite or
5585  NAN values in function/gradient. Abnormal
5586  termination signalled.
5587  * -7 gradient verification failed.
5588  See MinBCSetGradientCheck() for more information.
5589  * -3 inconsistent constraints.
5590  * 1 relative function improvement is no more than EpsF.
5591  * 2 scaled step is no more than EpsX.
5592  * 4 scaled gradient norm is no more than EpsG.
5593  * 5 MaxIts steps was taken
5594  * 8 terminated by user who called minbcrequesttermination().
5595  X contains point which was "current accepted" when
5596  termination request was submitted.
5597  More information about fields of this structure can be
5598  found in the comments on MinBCReport datatype.
5599 
5600  -- ALGLIB --
5601  Copyright 28.11.2010 by Bochkanov Sergey
5602 *************************************************************************/
5603 void minbcresults(const minbcstate &state, real_1d_array &x, minbcreport &rep);
5604 
5605 
5606 /*************************************************************************
5607 BC results
5608 
5609 Buffered implementation of MinBCResults() which uses pre-allocated buffer
5610 to store X[]. If buffer size is too small, it resizes buffer. It is
5611 intended to be used in the inner cycles of performance critical algorithms
5612 where array reallocation penalty is too large to be ignored.
5613 
5614  -- ALGLIB --
5615  Copyright 28.11.2010 by Bochkanov Sergey
5616 *************************************************************************/
5618 
5619 
5620 /*************************************************************************
5621 This subroutine restarts algorithm from new point.
5622 All optimization parameters (including constraints) are left unchanged.
5623 
5624 This function allows to solve multiple optimization problems (which
5625 must have same number of dimensions) without object reallocation penalty.
5626 
5627 INPUT PARAMETERS:
5628  State - structure previously allocated with MinBCCreate call.
5629  X - new starting point.
5630 
5631  -- ALGLIB --
5632  Copyright 28.11.2010 by Bochkanov Sergey
5633 *************************************************************************/
5634 void minbcrestartfrom(const minbcstate &state, const real_1d_array &x);
5635 
5636 
5637 /*************************************************************************
5638 This subroutine submits request for termination of running optimizer. It
5639 should be called from user-supplied callback when user decides that it is
5640 time to "smoothly" terminate optimization process. As result, optimizer
5641 stops at point which was "current accepted" when termination request was
5642 submitted and returns error code 8 (successful termination).
5643 
5644 INPUT PARAMETERS:
5645  State - optimizer structure
5646 
5647 NOTE: after request for termination optimizer may perform several
5648  additional calls to user-supplied callbacks. It does NOT guarantee
5649  to stop immediately - it just guarantees that these additional calls
5650  will be discarded later.
5651 
5652 NOTE: calling this function on optimizer which is NOT running will have no
5653  effect.
5654 
5655 NOTE: multiple calls to this function are possible. First call is counted,
5656  subsequent calls are silently ignored.
5657 
5658  -- ALGLIB --
5659  Copyright 08.10.2014 by Bochkanov Sergey
5660 *************************************************************************/
5662 
5663 
5664 /*************************************************************************
5665 This subroutine turns on verification of the user-supplied analytic
5666 gradient:
5667 * user calls this subroutine before optimization begins
5668 * MinBCOptimize() is called
5669 * prior to actual optimization, for each component of parameters being
5670  optimized X[i] algorithm performs following steps:
5671  * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
5672  where X[i] is i-th component of the initial point and S[i] is a scale
5673  of i-th parameter
5674  * if needed, steps are bounded with respect to constraints on X[]
5675  * F(X) is evaluated at these trial points
5676  * we perform one more evaluation in the middle point of the interval
5677  * we build cubic model using function values and derivatives at trial
5678  points and we compare its prediction with actual value in the middle
5679  point
5680  * in case difference between prediction and actual value is higher than
5681  some predetermined threshold, algorithm stops with completion code -7;
5682  Rep.VarIdx is set to index of the parameter with incorrect derivative.
5683 * after verification is over, algorithm proceeds to the actual optimization.
5684 
5685 NOTE 1: verification needs N (parameters count) gradient evaluations. It
5686  is very costly and you should use it only for low dimensional
5687  problems, when you want to be sure that you've correctly
5688  calculated analytic derivatives. You should not use it in the
5689  production code (unless you want to check derivatives provided by
5690  some third party).
5691 
5692 NOTE 2: you should carefully choose TestStep. Value which is too large
5693  (so large that function behaviour is significantly non-cubic) will
5694  lead to false alarms. You may use different step for different
5695  parameters by means of setting scale with MinBCSetScale().
5696 
5697 NOTE 3: this function may lead to false positives. In case it reports that
5698  I-th derivative was calculated incorrectly, you may decrease test
5699  step and try one more time - maybe your function changes too
5700  sharply and your step is too large for such rapidly chanding
5701  function.
5702 
5703 INPUT PARAMETERS:
5704  State - structure used to store algorithm state
5705  TestStep - verification step:
5706  * TestStep=0 turns verification off
5707  * TestStep>0 activates verification
5708 
5709  -- ALGLIB --
5710  Copyright 15.06.2012 by Bochkanov Sergey
5711 *************************************************************************/
5712 void minbcsetgradientcheck(const minbcstate &state, const double teststep);
5713 
5714 /*************************************************************************
5715  NONSMOOTH NONCONVEX OPTIMIZATION
5716  SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS
5717 
5718 DESCRIPTION:
5719 
5720 The subroutine minimizes function F(x) of N arguments subject to any
5721 combination of:
5722 * bound constraints
5723 * linear inequality constraints
5724 * linear equality constraints
5725 * nonlinear equality constraints Gi(x)=0
5726 * nonlinear inequality constraints Hi(x)<=0
5727 
5728 IMPORTANT: see MinNSSetAlgoAGS for important information on performance
5729  restrictions of AGS solver.
5730 
5731 REQUIREMENTS:
5732 * starting point X0 must be feasible or not too far away from the feasible
5733  set
5734 * F(), G(), H() are continuous, locally Lipschitz and continuously (but
5735  not necessarily twice) differentiable in an open dense subset of R^N.
5736  Functions F(), G() and H() may be nonsmooth and non-convex.
5737  Informally speaking, it means that functions are composed of large
5738  differentiable "patches" with nonsmoothness having place only at the
5739  boundaries between these "patches".
5740  Most real-life nonsmooth functions satisfy these requirements. Say,
5741  anything which involves finite number of abs(), min() and max() is very
5742  likely to pass the test.
5743  Say, it is possible to optimize anything of the following:
5744  * f=abs(x0)+2*abs(x1)
5745  * f=max(x0,x1)
5746  * f=sin(max(x0,x1)+abs(x2))
5747 * for nonlinearly constrained problems: F() must be bounded from below
5748  without nonlinear constraints (this requirement is due to the fact that,
5749  contrary to box and linear constraints, nonlinear ones require special
5750  handling).
5751 * user must provide function value and gradient for F(), H(), G() at all
5752  points where function/gradient can be calculated. If optimizer requires
5753  value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)),
5754  where gradient is not defined, user may resolve tie arbitrarily (in our
5755  case - return +1 or -1 at its discretion).
5756 * NS solver supports numerical differentiation, i.e. it may differentiate
5757  your function for you, but it results in 2N increase of function
5758  evaluations. Not recommended unless you solve really small problems. See
5759  minnscreatef() for more information on this functionality.
5760 
5761 USAGE:
5762 
5763 1. User initializes algorithm state with MinNSCreate() call and chooses
5764  what NLC solver to use. There is some solver which is used by default,
5765  with default settings, but you should NOT rely on default choice. It
5766  may change in future releases of ALGLIB without notice, and no one can
5767  guarantee that new solver will be able to solve your problem with
5768  default settings.
5769 
5770  From the other side, if you choose solver explicitly, you can be pretty
5771  sure that it will work with new ALGLIB releases.
5772 
5773  In the current release following solvers can be used:
5774  * AGS solver (activated with MinNSSetAlgoAGS() function)
5775 
5776 2. User adds boundary and/or linear and/or nonlinear constraints by means
5777  of calling one of the following functions:
5778  a) MinNSSetBC() for boundary constraints
5779  b) MinNSSetLC() for linear constraints
5780  c) MinNSSetNLC() for nonlinear constraints
5781  You may combine (a), (b) and (c) in one optimization problem.
5782 
5783 3. User sets scale of the variables with MinNSSetScale() function. It is
5784  VERY important to set scale of the variables, because nonlinearly
5785  constrained problems are hard to solve when variables are badly scaled.
5786 
5787 4. User sets stopping conditions with MinNSSetCond().
5788 
5789 5. Finally, user calls MinNSOptimize() function which takes algorithm
5790  state and pointer (delegate, etc) to callback function which calculates
5791  F/G/H.
5792 
5793 7. User calls MinNSResults() to get solution
5794 
5795 8. Optionally user may call MinNSRestartFrom() to solve another problem
5796  with same N but another starting point. MinNSRestartFrom() allows to
5797  reuse already initialized structure.
5798 
5799 
5800 INPUT PARAMETERS:
5801  N - problem dimension, N>0:
5802  * if given, only leading N elements of X are used
5803  * if not given, automatically determined from size of X
5804  X - starting point, array[N]:
5805  * it is better to set X to a feasible point
5806  * but X can be infeasible, in which case algorithm will try
5807  to find feasible point first, using X as initial
5808  approximation.
5809 
5810 OUTPUT PARAMETERS:
5811  State - structure stores algorithm state
5812 
5813 NOTE: minnscreatef() function may be used if you do not have analytic
5814  gradient. This function creates solver which uses numerical
5815  differentiation with user-specified step.
5816 
5817  -- ALGLIB --
5818  Copyright 18.05.2015 by Bochkanov Sergey
5819 *************************************************************************/
5820 void minnscreate(const ae_int_t n, const real_1d_array &x, minnsstate &state);
5821 void minnscreate(const real_1d_array &x, minnsstate &state);
5822 
5823 
5824 /*************************************************************************
5825 Version of minnscreatef() which uses numerical differentiation. I.e., you
5826 do not have to calculate derivatives yourself. However, this version needs
5827 2N times more function evaluations.
5828 
5829 2-point differentiation formula is used, because more precise 4-point
5830 formula is unstable when used on non-smooth functions.
5831 
5832 INPUT PARAMETERS:
5833  N - problem dimension, N>0:
5834  * if given, only leading N elements of X are used
5835  * if not given, automatically determined from size of X
5836  X - starting point, array[N]:
5837  * it is better to set X to a feasible point
5838  * but X can be infeasible, in which case algorithm will try
5839  to find feasible point first, using X as initial
5840  approximation.
5841  DiffStep- differentiation step, DiffStep>0. Algorithm performs
5842  numerical differentiation with step for I-th variable
5843  being equal to DiffStep*S[I] (here S[] is a scale vector,
5844  set by minnssetscale() function).
5845  Do not use too small steps, because it may lead to
5846  catastrophic cancellation during intermediate calculations.
5847 
5848 OUTPUT PARAMETERS:
5849  State - structure stores algorithm state
5850 
5851  -- ALGLIB --
5852  Copyright 18.05.2015 by Bochkanov Sergey
5853 *************************************************************************/
5854 void minnscreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnsstate &state);
5855 void minnscreatef(const real_1d_array &x, const double diffstep, minnsstate &state);
5856 
5857 
5858 /*************************************************************************
5859 This function sets boundary constraints.
5860 
5861 Boundary constraints are inactive by default (after initial creation).
5862 They are preserved after algorithm restart with minnsrestartfrom().
5863 
5864 INPUT PARAMETERS:
5865  State - structure stores algorithm state
5866  BndL - lower bounds, array[N].
5867  If some (all) variables are unbounded, you may specify
5868  very small number or -INF.
5869  BndU - upper bounds, array[N].
5870  If some (all) variables are unbounded, you may specify
5871  very large number or +INF.
5872 
5873 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
5874 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
5875 
5876 NOTE 2: AGS solver has following useful properties:
5877 * bound constraints are always satisfied exactly
5878 * function is evaluated only INSIDE area specified by bound constraints,
5879  even when numerical differentiation is used (algorithm adjusts nodes
5880  according to boundary constraints)
5881 
5882  -- ALGLIB --
5883  Copyright 18.05.2015 by Bochkanov Sergey
5884 *************************************************************************/
5885 void minnssetbc(const minnsstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
5886 
5887 
5888 /*************************************************************************
5889 This function sets linear constraints.
5890 
5891 Linear constraints are inactive by default (after initial creation).
5892 They are preserved after algorithm restart with minnsrestartfrom().
5893 
5894 INPUT PARAMETERS:
5895  State - structure previously allocated with minnscreate() call.
5896  C - linear constraints, array[K,N+1].
5897  Each row of C represents one constraint, either equality
5898  or inequality (see below):
5899  * first N elements correspond to coefficients,
5900  * last element corresponds to the right part.
5901  All elements of C (including right part) must be finite.
5902  CT - type of constraints, array[K]:
5903  * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
5904  * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
5905  * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
5906  K - number of equality/inequality constraints, K>=0:
5907  * if given, only leading K elements of C/CT are used
5908  * if not given, automatically determined from sizes of C/CT
5909 
5910 NOTE: linear (non-bound) constraints are satisfied only approximately:
5911 
5912 * there always exists some minor violation (about current sampling radius
5913  in magnitude during optimization, about EpsX in the solution) due to use
5914  of penalty method to handle constraints.
5915 * numerical differentiation, if used, may lead to function evaluations
5916  outside of the feasible area, because algorithm does NOT change
5917  numerical differentiation formula according to linear constraints.
5918 
5919 If you want constraints to be satisfied exactly, try to reformulate your
5920 problem in such manner that all constraints will become boundary ones
5921 (this kind of constraints is always satisfied exactly, both in the final
5922 solution and in all intermediate points).
5923 
5924  -- ALGLIB --
5925  Copyright 18.05.2015 by Bochkanov Sergey
5926 *************************************************************************/
5927 void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
5928 void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct);
5929 
5930 
5931 /*************************************************************************
5932 This function sets nonlinear constraints.
5933 
5934 In fact, this function sets NUMBER of nonlinear constraints. Constraints
5935 itself (constraint functions) are passed to minnsoptimize() method. This
5936 method requires user-defined vector function F[] and its Jacobian J[],
5937 where:
5938 * first component of F[] and first row of Jacobian J[] correspond to
5939  function being minimized
5940 * next NLEC components of F[] (and rows of J) correspond to nonlinear
5941  equality constraints G_i(x)=0
5942 * next NLIC components of F[] (and rows of J) correspond to nonlinear
5943  inequality constraints H_i(x)<=0
5944 
5945 NOTE: you may combine nonlinear constraints with linear/boundary ones. If
5946  your problem has mixed constraints, you may explicitly specify some
5947  of them as linear ones. It may help optimizer to handle them more
5948  efficiently.
5949 
5950 INPUT PARAMETERS:
5951  State - structure previously allocated with minnscreate() call.
5952  NLEC - number of Non-Linear Equality Constraints (NLEC), >=0
5953  NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0
5954 
5955 NOTE 1: nonlinear constraints are satisfied only approximately! It is
5956  possible that algorithm will evaluate function outside of
5957  the feasible area!
5958 
5959 NOTE 2: algorithm scales variables according to scale specified by
5960  minnssetscale() function, so it can handle problems with badly
5961  scaled variables (as long as we KNOW their scales).
5962 
5963  However, there is no way to automatically scale nonlinear
5964  constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may
5965  ruin convergence. Solving problem with constraint "1000*G0(x)=0"
5966  is NOT same as solving it with constraint "0.001*G0(x)=0".
5967 
5968  It means that YOU are the one who is responsible for correct
5969  scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you
5970  to scale nonlinear constraints in such way that I-th component of
5971  dG/dX (or dH/dx) has approximately unit magnitude (for problems
5972  with unit scale) or has magnitude approximately equal to 1/S[i]
5973  (where S is a scale set by minnssetscale() function).
5974 
5975 NOTE 3: nonlinear constraints are always hard to handle, no matter what
5976  algorithm you try to use. Even basic box/linear constraints modify
5977  function curvature by adding valleys and ridges. However,
5978  nonlinear constraints add valleys which are very hard to follow
5979  due to their "curved" nature.
5980 
5981  It means that optimization with single nonlinear constraint may be
5982  significantly slower than optimization with multiple linear ones.
5983  It is normal situation, and we recommend you to carefully choose
5984  Rho parameter of minnssetalgoags(), because too large value may
5985  slow down convergence.
5986 
5987 
5988  -- ALGLIB --
5989  Copyright 18.05.2015 by Bochkanov Sergey
5990 *************************************************************************/
5991 void minnssetnlc(const minnsstate &state, const ae_int_t nlec, const ae_int_t nlic);
5992 
5993 
5994 /*************************************************************************
5995 This function sets stopping conditions for iterations of optimizer.
5996 
5997 INPUT PARAMETERS:
5998  State - structure which stores algorithm state
5999  EpsX - >=0
6000  The AGS solver finishes its work if on k+1-th iteration
6001  sampling radius decreases below EpsX.
6002  MaxIts - maximum number of iterations. If MaxIts=0, the number of
6003  iterations is unlimited.
6004 
6005 Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic
6006 stopping criterion selection. We do not recommend you to rely on default
6007 choice in production code.
6008 
6009  -- ALGLIB --
6010  Copyright 18.05.2015 by Bochkanov Sergey
6011 *************************************************************************/
6012 void minnssetcond(const minnsstate &state, const double epsx, const ae_int_t maxits);
6013 
6014 
6015 /*************************************************************************
6016 This function sets scaling coefficients for NLC optimizer.
6017 
6018 ALGLIB optimizers use scaling matrices to test stopping conditions (step
6019 size and gradient are scaled before comparison with tolerances). Scale of
6020 the I-th variable is a translation invariant measure of:
6021 a) "how large" the variable is
6022 b) how large the step should be to make significant changes in the function
6023 
6024 Scaling is also used by finite difference variant of the optimizer - step
6025 along I-th axis is equal to DiffStep*S[I].
6026 
6027 INPUT PARAMETERS:
6028  State - structure stores algorithm state
6029  S - array[N], non-zero scaling coefficients
6030  S[i] may be negative, sign doesn't matter.
6031 
6032  -- ALGLIB --
6033  Copyright 18.05.2015 by Bochkanov Sergey
6034 *************************************************************************/
6035 void minnssetscale(const minnsstate &state, const real_1d_array &s);
6036 
6037 
6038 /*************************************************************************
6039 This function tells MinNS unit to use AGS (adaptive gradient sampling)
6040 algorithm for nonsmooth constrained optimization. This algorithm is a
6041 slight modification of one described in "An Adaptive Gradient Sampling
6042 Algorithm for Nonsmooth Optimization" by Frank E. Curtisy and Xiaocun Quez.
6043 
6044 This optimizer has following benefits and drawbacks:
6045 + robustness; it can be used with nonsmooth and nonconvex functions.
6046 + relatively easy tuning; most of the metaparameters are easy to select.
6047 - it has convergence of steepest descent, slower than CG/LBFGS.
6048 - each iteration involves evaluation of ~2N gradient values and solution
6049  of 2Nx2N quadratic programming problem, which limits applicability of
6050  algorithm by small-scale problems (up to 50-100).
6051 
6052 IMPORTANT: this algorithm has convergence guarantees, i.e. it will
6053  steadily move towards some stationary point of the function.
6054 
6055  However, "stationary point" does not always mean "solution".
6056  Nonsmooth problems often have "flat spots", i.e. areas where
6057  function do not change at all. Such "flat spots" are stationary
6058  points by definition, and algorithm may be caught here.
6059 
6060  Nonsmooth CONVEX tasks are not prone to this problem. Say, if
6061  your function has form f()=MAX(f0,f1,...), and f_i are convex,
6062  then f() is convex too and you have guaranteed convergence to
6063  solution.
6064 
6065 INPUT PARAMETERS:
6066  State - structure which stores algorithm state
6067  Radius - initial sampling radius, >=0.
6068 
6069  Internally multiplied by vector of per-variable scales
6070  specified by minnssetscale()).
6071 
6072  You should select relatively large sampling radius, roughly
6073  proportional to scaled length of the first steps of the
6074  algorithm. Something close to 0.1 in magnitude should be
6075  good for most problems.
6076 
6077  AGS solver can automatically decrease radius, so too large
6078  radius is not a problem (assuming that you won't choose
6079  so large radius that algorithm will sample function in
6080  too far away points, where gradient value is irrelevant).
6081 
6082  Too small radius won't cause algorithm to fail, but it may
6083  slow down algorithm (it may have to perform too short
6084  steps).
6085  Penalty - penalty coefficient for nonlinear constraints:
6086  * for problem with nonlinear constraints should be some
6087  problem-specific positive value, large enough that
6088  penalty term changes shape of the function.
6089  Starting from some problem-specific value penalty
6090  coefficient becomes large enough to exactly enforce
6091  nonlinear constraints; larger values do not improve
6092  precision.
6093  Increasing it too much may slow down convergence, so you
6094  should choose it carefully.
6095  * can be zero for problems WITHOUT nonlinear constraints
6096  (i.e. for unconstrained ones or ones with just box or
6097  linear constraints)
6098  * if you specify zero value for problem with at least one
6099  nonlinear constraint, algorithm will terminate with
6100  error code -1.
6101 
6102 ALGORITHM OUTLINE
6103 
6104 The very basic outline of unconstrained AGS algorithm is given below:
6105 
6106 0. If sampling radius is below EpsX or we performed more then MaxIts
6107  iterations - STOP.
6108 1. sample O(N) gradient values at random locations around current point;
6109  informally speaking, this sample is an implicit piecewise linear model
6110  of the function, although algorithm formulation does not mention that
6111  explicitly
6112 2. solve quadratic programming problem in order to find descent direction
6113 3. if QP solver tells us that we are near solution, decrease sampling
6114  radius and move to (0)
6115 4. perform backtracking line search
6116 5. after moving to new point, goto (0)
6117 
6118 As for the constraints:
6119 * box constraints are handled exactly by modification of the function
6120  being minimized
6121 * linear/nonlinear constraints are handled by adding L1 penalty. Because
6122  our solver can handle nonsmoothness, we can use L1 penalty function,
6123  which is an exact one (i.e. exact solution is returned under such
6124  penalty).
6125 * penalty coefficient for linear constraints is chosen automatically;
6126  however, penalty coefficient for nonlinear constraints must be specified
6127  by user.
6128 
6129  -- ALGLIB --
6130  Copyright 18.05.2015 by Bochkanov Sergey
6131 *************************************************************************/
6132 void minnssetalgoags(const minnsstate &state, const double radius, const double penalty);
6133 
6134 
6135 /*************************************************************************
6136 This function turns on/off reporting.
6137 
6138 INPUT PARAMETERS:
6139  State - structure which stores algorithm state
6140  NeedXRep- whether iteration reports are needed or not
6141 
6142 If NeedXRep is True, algorithm will call rep() callback function if it is
6143 provided to minnsoptimize().
6144 
6145  -- ALGLIB --
6146  Copyright 28.11.2010 by Bochkanov Sergey
6147 *************************************************************************/
6148 void minnssetxrep(const minnsstate &state, const bool needxrep);
6149 
6150 
6151 /*************************************************************************
6152 This subroutine submits request for termination of running optimizer. It
6153 should be called from user-supplied callback when user decides that it is
6154 time to "smoothly" terminate optimization process. As result, optimizer
6155 stops at point which was "current accepted" when termination request was
6156 submitted and returns error code 8 (successful termination).
6157 
6158 INPUT PARAMETERS:
6159  State - optimizer structure
6160 
6161 NOTE: after request for termination optimizer may perform several
6162  additional calls to user-supplied callbacks. It does NOT guarantee
6163  to stop immediately - it just guarantees that these additional calls
6164  will be discarded later.
6165 
6166 NOTE: calling this function on optimizer which is NOT running will have no
6167  effect.
6168 
6169 NOTE: multiple calls to this function are possible. First call is counted,
6170  subsequent calls are silently ignored.
6171 
6172  -- ALGLIB --
6173  Copyright 18.05.2015 by Bochkanov Sergey
6174 *************************************************************************/
6176 
6177 
6178 /*************************************************************************
6179 This function provides reverse communication interface
6180 Reverse communication interface is not documented or recommended to use.
6181 See below for functions which provide better documented API
6182 *************************************************************************/
6183 bool minnsiteration(const minnsstate &state);
6184 
6185 
6186 /*************************************************************************
6187 This family of functions is used to launcn iterations of nonlinear optimizer
6188 
6189 These functions accept following parameters:
6190  state - algorithm state
6191  fvec - callback which calculates function vector fi[]
6192  at given point x
6193  jac - callback which calculates function vector fi[]
6194  and Jacobian jac at given point x
6195  rep - optional callback which is called after each iteration
6196  can be NULL
6197  ptr - optional pointer which is passed to func/grad/hess/jac/rep
6198  can be NULL
6199 
6200 
6201 NOTES:
6202 
6203 1. This function has two different implementations: one which uses exact
6204  (analytical) user-supplied Jacobian, and one which uses only function
6205  vector and numerically differentiates function in order to obtain
6206  gradient.
6207 
6208  Depending on the specific function used to create optimizer object
6209  you should choose appropriate variant of minnsoptimize() - one which
6210  accepts function AND Jacobian or one which accepts ONLY function.
6211 
6212  Be careful to choose variant of minnsoptimize() which corresponds to
6213  your optimization scheme! Table below lists different combinations of
6214  callback (function/gradient) passed to minnsoptimize() and specific
6215  function used to create optimizer.
6216 
6217 
6218  | USER PASSED TO minnsoptimize()
6219  CREATED WITH | function only | function and gradient
6220  ------------------------------------------------------------
6221  minnscreatef() | works FAILS
6222  minnscreate() | FAILS works
6223 
6224  Here "FAILS" denotes inappropriate combinations of optimizer creation
6225  function and minnsoptimize() version. Attemps to use such
6226  combination will lead to exception. Either you did not pass gradient
6227  when it WAS needed or you passed gradient when it was NOT needed.
6228 
6229  -- ALGLIB --
6230  Copyright 18.05.2015 by Bochkanov Sergey
6231 
6232 *************************************************************************/
6234  void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6235  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6236  void *ptr = NULL);
6238  void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6239  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6240  void *ptr = NULL);
6241 
6242 
6243 /*************************************************************************
6244 MinNS results
6245 
6246 INPUT PARAMETERS:
6247  State - algorithm state
6248 
6249 OUTPUT PARAMETERS:
6250  X - array[0..N-1], solution
6251  Rep - optimization report. You should check Rep.TerminationType
6252  in order to distinguish successful termination from
6253  unsuccessful one:
6254  * -8 internal integrity control detected infinite or
6255  NAN values in function/gradient. Abnormal
6256  termination signalled.
6257  * -3 box constraints are inconsistent
6258  * -1 inconsistent parameters were passed:
6259  * penalty parameter for minnssetalgoags() is zero,
6260  but we have nonlinear constraints set by minnssetnlc()
6261  * 2 sampling radius decreased below epsx
6262  * 7 stopping conditions are too stringent,
6263  further improvement is impossible,
6264  X contains best point found so far.
6265  * 8 User requested termination via minnsrequesttermination()
6266 
6267  -- ALGLIB --
6268  Copyright 18.05.2015 by Bochkanov Sergey
6269 *************************************************************************/
6270 void minnsresults(const minnsstate &state, real_1d_array &x, minnsreport &rep);
6271 
6272 
6273 /*************************************************************************
6274 
6275 Buffered implementation of minnsresults() which uses pre-allocated buffer
6276 to store X[]. If buffer size is too small, it resizes buffer. It is
6277 intended to be used in the inner cycles of performance critical algorithms
6278 where array reallocation penalty is too large to be ignored.
6279 
6280  -- ALGLIB --
6281  Copyright 18.05.2015 by Bochkanov Sergey
6282 *************************************************************************/
6284 
6285 
6286 /*************************************************************************
6287 This subroutine restarts algorithm from new point.
6288 All optimization parameters (including constraints) are left unchanged.
6289 
6290 This function allows to solve multiple optimization problems (which
6291 must have same number of dimensions) without object reallocation penalty.
6292 
6293 INPUT PARAMETERS:
6294  State - structure previously allocated with minnscreate() call.
6295  X - new starting point.
6296 
6297  -- ALGLIB --
6298  Copyright 18.05.2015 by Bochkanov Sergey
6299 *************************************************************************/
6300 void minnsrestartfrom(const minnsstate &state, const real_1d_array &x);
6301 
6302 /*************************************************************************
6303 Obsolete function, use MinLBFGSSetPrecDefault() instead.
6304 
6305  -- ALGLIB --
6306  Copyright 13.10.2010 by Bochkanov Sergey
6307 *************************************************************************/
6309 
6310 
6311 /*************************************************************************
6312 Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead.
6313 
6314  -- ALGLIB --
6315  Copyright 13.10.2010 by Bochkanov Sergey
6316 *************************************************************************/
6317 void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const real_2d_array &p, const bool isupper);
6318 
6319 
6320 /*************************************************************************
6321 This is obsolete function which was used by previous version of the BLEIC
6322 optimizer. It does nothing in the current version of BLEIC.
6323 
6324  -- ALGLIB --
6325  Copyright 28.11.2010 by Bochkanov Sergey
6326 *************************************************************************/
6327 void minbleicsetbarrierwidth(const minbleicstate &state, const double mu);
6328 
6329 
6330 /*************************************************************************
6331 This is obsolete function which was used by previous version of the BLEIC
6332 optimizer. It does nothing in the current version of BLEIC.
6333 
6334  -- ALGLIB --
6335  Copyright 28.11.2010 by Bochkanov Sergey
6336 *************************************************************************/
6337 void minbleicsetbarrierdecay(const minbleicstate &state, const double mudecay);
6338 
6339 
6340 /*************************************************************************
6341 Obsolete optimization algorithm.
6342 Was replaced by MinBLEIC subpackage.
6343 
6344  -- ALGLIB --
6345  Copyright 25.03.2010 by Bochkanov Sergey
6346 *************************************************************************/
6347 void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state);
6348 void minasacreate(const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state);
6349 
6350 
6351 /*************************************************************************
6352 Obsolete optimization algorithm.
6353 Was replaced by MinBLEIC subpackage.
6354 
6355  -- ALGLIB --
6356  Copyright 02.04.2010 by Bochkanov Sergey
6357 *************************************************************************/
6358 void minasasetcond(const minasastate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits);
6359 
6360 
6361 /*************************************************************************
6362 Obsolete optimization algorithm.
6363 Was replaced by MinBLEIC subpackage.
6364 
6365  -- ALGLIB --
6366  Copyright 02.04.2010 by Bochkanov Sergey
6367 *************************************************************************/
6368 void minasasetxrep(const minasastate &state, const bool needxrep);
6369 
6370 
6371 /*************************************************************************
6372 Obsolete optimization algorithm.
6373 Was replaced by MinBLEIC subpackage.
6374 
6375  -- ALGLIB --
6376  Copyright 02.04.2010 by Bochkanov Sergey
6377 *************************************************************************/
6378 void minasasetalgorithm(const minasastate &state, const ae_int_t algotype);
6379 
6380 
6381 /*************************************************************************
6382 Obsolete optimization algorithm.
6383 Was replaced by MinBLEIC subpackage.
6384 
6385  -- ALGLIB --
6386  Copyright 02.04.2010 by Bochkanov Sergey
6387 *************************************************************************/
6388 void minasasetstpmax(const minasastate &state, const double stpmax);
6389 
6390 
6391 /*************************************************************************
6392 This function provides reverse communication interface
6393 Reverse communication interface is not documented or recommended to use.
6394 See below for functions which provide better documented API
6395 *************************************************************************/
6396 bool minasaiteration(const minasastate &state);
6397 
6398 
6399 /*************************************************************************
6400 This family of functions is used to launcn iterations of nonlinear optimizer
6401 
6402 These functions accept following parameters:
6403  state - algorithm state
6404  grad - callback which calculates function (or merit function)
6405  value func and gradient grad at given point x
6406  rep - optional callback which is called after each iteration
6407  can be NULL
6408  ptr - optional pointer which is passed to func/grad/hess/jac/rep
6409  can be NULL
6410 
6411 
6412  -- ALGLIB --
6413  Copyright 20.03.2009 by Bochkanov Sergey
6414 
6415 *************************************************************************/
6417  void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
6418  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6419  void *ptr = NULL);
6420 
6421 
6422 /*************************************************************************
6423 Obsolete optimization algorithm.
6424 Was replaced by MinBLEIC subpackage.
6425 
6426  -- ALGLIB --
6427  Copyright 20.03.2009 by Bochkanov Sergey
6428 *************************************************************************/
6430 
6431 
6432 /*************************************************************************
6433 Obsolete optimization algorithm.
6434 Was replaced by MinBLEIC subpackage.
6435 
6436  -- ALGLIB --
6437  Copyright 20.03.2009 by Bochkanov Sergey
6438 *************************************************************************/
6440 
6441 
6442 /*************************************************************************
6443 Obsolete optimization algorithm.
6444 Was replaced by MinBLEIC subpackage.
6445 
6446  -- ALGLIB --
6447  Copyright 30.07.2010 by Bochkanov Sergey
6448 *************************************************************************/
6449 void minasarestartfrom(const minasastate &state, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu);
6450 
6451 /*************************************************************************
6452  IMPROVED LEVENBERG-MARQUARDT METHOD FOR
6453  NON-LINEAR LEAST SQUARES OPTIMIZATION
6454 
6455 DESCRIPTION:
6456 This function is used to find minimum of function which is represented as
6457 sum of squares:
6458  F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
6459 using value of function vector f[] and Jacobian of f[].
6460 
6461 
6462 REQUIREMENTS:
6463 This algorithm will request following information during its operation:
6464 
6465 * function vector f[] at given point X
6466 * function vector f[] and Jacobian of f[] (simultaneously) at given point
6467 
6468 There are several overloaded versions of MinLMOptimize() function which
6469 correspond to different LM-like optimization algorithms provided by this
6470 unit. You should choose version which accepts fvec() and jac() callbacks.
6471 First one is used to calculate f[] at given point, second one calculates
6472 f[] and Jacobian df[i]/dx[j].
6473 
6474 You can try to initialize MinLMState structure with VJ function and then
6475 use incorrect version of MinLMOptimize() (for example, version which
6476 works with general form function and does not provide Jacobian), but it
6477 will lead to exception being thrown after first attempt to calculate
6478 Jacobian.
6479 
6480 
6481 USAGE:
6482 1. User initializes algorithm state with MinLMCreateVJ() call
6483 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
6484  other functions
6485 3. User calls MinLMOptimize() function which takes algorithm state and
6486  callback functions.
6487 4. User calls MinLMResults() to get solution
6488 5. Optionally, user may call MinLMRestartFrom() to solve another problem
6489  with same N/M but another starting point and/or another function.
6490  MinLMRestartFrom() allows to reuse already initialized structure.
6491 
6492 
6493 INPUT PARAMETERS:
6494  N - dimension, N>1
6495  * if given, only leading N elements of X are used
6496  * if not given, automatically determined from size of X
6497  M - number of functions f[i]
6498  X - initial solution, array[0..N-1]
6499 
6500 OUTPUT PARAMETERS:
6501  State - structure which stores algorithm state
6502 
6503 NOTES:
6504 1. you may tune stopping conditions with MinLMSetCond() function
6505 2. if target function contains exp() or other fast growing functions, and
6506  optimization algorithm makes too large steps which leads to overflow,
6507  use MinLMSetStpMax() function to bound algorithm's steps.
6508 
6509  -- ALGLIB --
6510  Copyright 30.03.2009 by Bochkanov Sergey
6511 *************************************************************************/
6512 void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state);
6513 void minlmcreatevj(const ae_int_t m, const real_1d_array &x, minlmstate &state);
6514 
6515 
6516 /*************************************************************************
6517  IMPROVED LEVENBERG-MARQUARDT METHOD FOR
6518  NON-LINEAR LEAST SQUARES OPTIMIZATION
6519 
6520 DESCRIPTION:
6521 This function is used to find minimum of function which is represented as
6522 sum of squares:
6523  F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1])
6524 using value of function vector f[] only. Finite differences are used to
6525 calculate Jacobian.
6526 
6527 
6528 REQUIREMENTS:
6529 This algorithm will request following information during its operation:
6530 * function vector f[] at given point X
6531 
6532 There are several overloaded versions of MinLMOptimize() function which
6533 correspond to different LM-like optimization algorithms provided by this
6534 unit. You should choose version which accepts fvec() callback.
6535 
6536 You can try to initialize MinLMState structure with VJ function and then
6537 use incorrect version of MinLMOptimize() (for example, version which
6538 works with general form function and does not accept function vector), but
6539 it will lead to exception being thrown after first attempt to calculate
6540 Jacobian.
6541 
6542 
6543 USAGE:
6544 1. User initializes algorithm state with MinLMCreateV() call
6545 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
6546  other functions
6547 3. User calls MinLMOptimize() function which takes algorithm state and
6548  callback functions.
6549 4. User calls MinLMResults() to get solution
6550 5. Optionally, user may call MinLMRestartFrom() to solve another problem
6551  with same N/M but another starting point and/or another function.
6552  MinLMRestartFrom() allows to reuse already initialized structure.
6553 
6554 
6555 INPUT PARAMETERS:
6556  N - dimension, N>1
6557  * if given, only leading N elements of X are used
6558  * if not given, automatically determined from size of X
6559  M - number of functions f[i]
6560  X - initial solution, array[0..N-1]
6561  DiffStep- differentiation step, >0
6562 
6563 OUTPUT PARAMETERS:
6564  State - structure which stores algorithm state
6565 
6566 See also MinLMIteration, MinLMResults.
6567 
6568 NOTES:
6569 1. you may tune stopping conditions with MinLMSetCond() function
6570 2. if target function contains exp() or other fast growing functions, and
6571  optimization algorithm makes too large steps which leads to overflow,
6572  use MinLMSetStpMax() function to bound algorithm's steps.
6573 
6574  -- ALGLIB --
6575  Copyright 30.03.2009 by Bochkanov Sergey
6576 *************************************************************************/
6577 void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state);
6578 void minlmcreatev(const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state);
6579 
6580 
6581 /*************************************************************************
6582  LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION
6583 
6584 DESCRIPTION:
6585 This function is used to find minimum of general form (not "sum-of-
6586 -squares") function
6587  F = F(x[0], ..., x[n-1])
6588 using its gradient and Hessian. Levenberg-Marquardt modification with
6589 L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization
6590 after each Levenberg-Marquardt step is used.
6591 
6592 
6593 REQUIREMENTS:
6594 This algorithm will request following information during its operation:
6595 
6596 * function value F at given point X
6597 * F and gradient G (simultaneously) at given point X
6598 * F, G and Hessian H (simultaneously) at given point X
6599 
6600 There are several overloaded versions of MinLMOptimize() function which
6601 correspond to different LM-like optimization algorithms provided by this
6602 unit. You should choose version which accepts func(), grad() and hess()
6603 function pointers. First pointer is used to calculate F at given point,
6604 second one calculates F(x) and grad F(x), third one calculates F(x),
6605 grad F(x), hess F(x).
6606 
6607 You can try to initialize MinLMState structure with FGH-function and then
6608 use incorrect version of MinLMOptimize() (for example, version which does
6609 not provide Hessian matrix), but it will lead to exception being thrown
6610 after first attempt to calculate Hessian.
6611 
6612 
6613 USAGE:
6614 1. User initializes algorithm state with MinLMCreateFGH() call
6615 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and
6616  other functions
6617 3. User calls MinLMOptimize() function which takes algorithm state and
6618  pointers (delegates, etc.) to callback functions.
6619 4. User calls MinLMResults() to get solution
6620 5. Optionally, user may call MinLMRestartFrom() to solve another problem
6621  with same N but another starting point and/or another function.
6622  MinLMRestartFrom() allows to reuse already initialized structure.
6623 
6624 
6625 INPUT PARAMETERS:
6626  N - dimension, N>1
6627  * if given, only leading N elements of X are used
6628  * if not given, automatically determined from size of X
6629  X - initial solution, array[0..N-1]
6630 
6631 OUTPUT PARAMETERS:
6632  State - structure which stores algorithm state
6633 
6634 NOTES:
6635 1. you may tune stopping conditions with MinLMSetCond() function
6636 2. if target function contains exp() or other fast growing functions, and
6637  optimization algorithm makes too large steps which leads to overflow,
6638  use MinLMSetStpMax() function to bound algorithm's steps.
6639 
6640  -- ALGLIB --
6641  Copyright 30.03.2009 by Bochkanov Sergey
6642 *************************************************************************/
6643 void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &state);
6644 void minlmcreatefgh(const real_1d_array &x, minlmstate &state);
6645 
6646 
6647 /*************************************************************************
6648 This function sets stopping conditions for Levenberg-Marquardt optimization
6649 algorithm.
6650 
6651 INPUT PARAMETERS:
6652  State - structure which stores algorithm state
6653  EpsX - >=0
6654  The subroutine finishes its work if on k+1-th iteration
6655  the condition |v|<=EpsX is fulfilled, where:
6656  * |.| means Euclidian norm
6657  * v - scaled step vector, v[i]=dx[i]/s[i]
6658  * dx - ste pvector, dx=X(k+1)-X(k)
6659  * s - scaling coefficients set by MinLMSetScale()
6660  Recommended values: 1E-9 ... 1E-12.
6661  MaxIts - maximum number of iterations. If MaxIts=0, the number of
6662  iterations is unlimited. Only Levenberg-Marquardt
6663  iterations are counted (L-BFGS/CG iterations are NOT
6664  counted because their cost is very low compared to that of
6665  LM).
6666 
6667 Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic
6668 stopping criterion selection (small EpsX).
6669 
6670 NOTE: it is not recommended to set large EpsX (say, 0.001). Because LM is
6671  a second-order method, it performs very precise steps anyway.
6672 
6673  -- ALGLIB --
6674  Copyright 02.04.2010 by Bochkanov Sergey
6675 *************************************************************************/
6676 void minlmsetcond(const minlmstate &state, const double epsx, const ae_int_t maxits);
6677 
6678 
6679 /*************************************************************************
6680 This function turns on/off reporting.
6681 
6682 INPUT PARAMETERS:
6683  State - structure which stores algorithm state
6684  NeedXRep- whether iteration reports are needed or not
6685 
6686 If NeedXRep is True, algorithm will call rep() callback function if it is
6687 provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS
6688 iterations are reported.
6689 
6690  -- ALGLIB --
6691  Copyright 02.04.2010 by Bochkanov Sergey
6692 *************************************************************************/
6693 void minlmsetxrep(const minlmstate &state, const bool needxrep);
6694 
6695 
6696 /*************************************************************************
6697 This function sets maximum step length
6698 
6699 INPUT PARAMETERS:
6700  State - structure which stores algorithm state
6701  StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't
6702  want to limit step length.
6703 
6704 Use this subroutine when you optimize target function which contains exp()
6705 or other fast growing functions, and optimization algorithm makes too
6706 large steps which leads to overflow. This function allows us to reject
6707 steps that are too large (and therefore expose us to the possible
6708 overflow) without actually calculating function value at the x+stp*d.
6709 
6710 NOTE: non-zero StpMax leads to moderate performance degradation because
6711 intermediate step of preconditioned L-BFGS optimization is incompatible
6712 with limits on step size.
6713 
6714  -- ALGLIB --
6715  Copyright 02.04.2010 by Bochkanov Sergey
6716 *************************************************************************/
6717 void minlmsetstpmax(const minlmstate &state, const double stpmax);
6718 
6719 
6720 /*************************************************************************
6721 This function sets scaling coefficients for LM optimizer.
6722 
6723 ALGLIB optimizers use scaling matrices to test stopping conditions (step
6724 size and gradient are scaled before comparison with tolerances). Scale of
6725 the I-th variable is a translation invariant measure of:
6726 a) "how large" the variable is
6727 b) how large the step should be to make significant changes in the function
6728 
6729 Generally, scale is NOT considered to be a form of preconditioner. But LM
6730 optimizer is unique in that it uses scaling matrix both in the stopping
6731 condition tests and as Marquardt damping factor.
6732 
6733 Proper scaling is very important for the algorithm performance. It is less
6734 important for the quality of results, but still has some influence (it is
6735 easier to converge when variables are properly scaled, so premature
6736 stopping is possible when very badly scalled variables are combined with
6737 relaxed stopping conditions).
6738 
6739 INPUT PARAMETERS:
6740  State - structure stores algorithm state
6741  S - array[N], non-zero scaling coefficients
6742  S[i] may be negative, sign doesn't matter.
6743 
6744  -- ALGLIB --
6745  Copyright 14.01.2011 by Bochkanov Sergey
6746 *************************************************************************/
6747 void minlmsetscale(const minlmstate &state, const real_1d_array &s);
6748 
6749 
6750 /*************************************************************************
6751 This function sets boundary constraints for LM optimizer
6752 
6753 Boundary constraints are inactive by default (after initial creation).
6754 They are preserved until explicitly turned off with another SetBC() call.
6755 
6756 INPUT PARAMETERS:
6757  State - structure stores algorithm state
6758  BndL - lower bounds, array[N].
6759  If some (all) variables are unbounded, you may specify
6760  very small number or -INF (latter is recommended because
6761  it will allow solver to use better algorithm).
6762  BndU - upper bounds, array[N].
6763  If some (all) variables are unbounded, you may specify
6764  very large number or +INF (latter is recommended because
6765  it will allow solver to use better algorithm).
6766 
6767 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th
6768 variable will be "frozen" at X[i]=BndL[i]=BndU[i].
6769 
6770 NOTE 2: this solver has following useful properties:
6771 * bound constraints are always satisfied exactly
6772 * function is evaluated only INSIDE area specified by bound constraints
6773  or at its boundary
6774 
6775  -- ALGLIB --
6776  Copyright 14.01.2011 by Bochkanov Sergey
6777 *************************************************************************/
6778 void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const real_1d_array &bndu);
6779 
6780 
6781 /*************************************************************************
6782 This function sets general linear constraints for LM optimizer
6783 
6784 Linear constraints are inactive by default (after initial creation). They
6785 are preserved until explicitly turned off with another minlmsetlc() call.
6786 
6787 INPUT PARAMETERS:
6788  State - structure stores algorithm state
6789  C - linear constraints, array[K,N+1].
6790  Each row of C represents one constraint, either equality
6791  or inequality (see below):
6792  * first N elements correspond to coefficients,
6793  * last element corresponds to the right part.
6794  All elements of C (including right part) must be finite.
6795  CT - type of constraints, array[K]:
6796  * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1]
6797  * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1]
6798  * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1]
6799  K - number of equality/inequality constraints, K>=0:
6800  * if given, only leading K elements of C/CT are used
6801  * if not given, automatically determined from sizes of C/CT
6802 
6803 IMPORTANT: if you have linear constraints, it is strongly recommended to
6804  set scale of variables with minlmsetscale(). QP solver which is
6805  used to calculate linearly constrained steps heavily relies on
6806  good scaling of input problems.
6807 
6808 IMPORTANT: solvers created with minlmcreatefgh() do not support linear
6809  constraints.
6810 
6811 NOTE: linear (non-bound) constraints are satisfied only approximately -
6812  there always exists some violation due to numerical errors and
6813  algorithmic limitations.
6814 
6815 NOTE: general linear constraints add significant overhead to solution
6816  process. Although solver performs roughly same amount of iterations
6817  (when compared with similar box-only constrained problem), each
6818  iteration now involves solution of linearly constrained QP
6819  subproblem, which requires ~3-5 times more Cholesky decompositions.
6820  Thus, if you can reformulate your problem in such way this it has
6821  only box constraints, it may be beneficial to do so.
6822 
6823  -- ALGLIB --
6824  Copyright 14.01.2011 by Bochkanov Sergey
6825 *************************************************************************/
6826 void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k);
6827 void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct);
6828 
6829 
6830 /*************************************************************************
6831 This function is used to change acceleration settings
6832 
6833 You can choose between three acceleration strategies:
6834 * AccType=0, no acceleration.
6835 * AccType=1, secant updates are used to update quadratic model after each
6836  iteration. After fixed number of iterations (or after model breakdown)
6837  we recalculate quadratic model using analytic Jacobian or finite
6838  differences. Number of secant-based iterations depends on optimization
6839  settings: about 3 iterations - when we have analytic Jacobian, up to 2*N
6840  iterations - when we use finite differences to calculate Jacobian.
6841 
6842 AccType=1 is recommended when Jacobian calculation cost is prohibitively
6843 high (several Mx1 function vector calculations followed by several NxN
6844 Cholesky factorizations are faster than calculation of one M*N Jacobian).
6845 It should also be used when we have no Jacobian, because finite difference
6846 approximation takes too much time to compute.
6847 
6848 Table below list optimization protocols (XYZ protocol corresponds to
6849 MinLMCreateXYZ) and acceleration types they support (and use by default).
6850 
6851 ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS:
6852 
6853 protocol 0 1 comment
6854 V + +
6855 VJ + +
6856 FGH +
6857 
6858 DEFAULT VALUES:
6859 
6860 protocol 0 1 comment
6861 V x without acceleration it is so slooooooooow
6862 VJ x
6863 FGH x
6864 
6865 NOTE: this function should be called before optimization. Attempt to call
6866 it during algorithm iterations may result in unexpected behavior.
6867 
6868 NOTE: attempt to call this function with unsupported protocol/acceleration
6869 combination will result in exception being thrown.
6870 
6871  -- ALGLIB --
6872  Copyright 14.10.2010 by Bochkanov Sergey
6873 *************************************************************************/
6874 void minlmsetacctype(const minlmstate &state, const ae_int_t acctype);
6875 
6876 
6877 /*************************************************************************
6878 This function provides reverse communication interface
6879 Reverse communication interface is not documented or recommended to use.
6880 See below for functions which provide better documented API
6881 *************************************************************************/
6882 bool minlmiteration(const minlmstate &state);
6883 
6884 
6885 /*************************************************************************
6886 This family of functions is used to launcn iterations of nonlinear optimizer
6887 
6888 These functions accept following parameters:
6889  state - algorithm state
6890  func - callback which calculates function (or merit function)
6891  value func at given point x
6892  grad - callback which calculates function (or merit function)
6893  value func and gradient grad at given point x
6894  hess - callback which calculates function (or merit function)
6895  value func, gradient grad and Hessian hess at given point x
6896  fvec - callback which calculates function vector fi[]
6897  at given point x
6898  jac - callback which calculates function vector fi[]
6899  and Jacobian jac at given point x
6900  rep - optional callback which is called after each iteration
6901  can be NULL
6902  ptr - optional pointer which is passed to func/grad/hess/jac/rep
6903  can be NULL
6904 
6905 NOTES:
6906 
6907 1. Depending on function used to create state structure, this algorithm
6908  may accept Jacobian and/or Hessian and/or gradient. According to the
6909  said above, there ase several versions of this function, which accept
6910  different sets of callbacks.
6911 
6912  This flexibility opens way to subtle errors - you may create state with
6913  MinLMCreateFGH() (optimization using Hessian), but call function which
6914  does not accept Hessian. So when algorithm will request Hessian, there
6915  will be no callback to call. In this case exception will be thrown.
6916 
6917  Be careful to avoid such errors because there is no way to find them at
6918  compile time - you can see them at runtime only.
6919 
6920  -- ALGLIB --
6921  Copyright 10.03.2009 by Bochkanov Sergey
6922 
6923 *************************************************************************/
6925  void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6926  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6927  void *ptr = NULL);
6929  void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr),
6930  void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6931  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6932  void *ptr = NULL);
6934  void (*func)(const real_1d_array &x, double &func, void *ptr),
6935  void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
6936  void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr),
6937  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6938  void *ptr = NULL);
6940  void (*func)(const real_1d_array &x, double &func, void *ptr),
6941  void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6942  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6943  void *ptr = NULL);
6945  void (*func)(const real_1d_array &x, double &func, void *ptr),
6946  void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr),
6947  void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr),
6948  void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL,
6949  void *ptr = NULL);
6950 
6951 
6952 /*************************************************************************
6953 Levenberg-Marquardt algorithm results
6954 
6955 INPUT PARAMETERS:
6956  State - algorithm state
6957 
6958 OUTPUT PARAMETERS:
6959  X - array[0..N-1], solution
6960  Rep - optimization report; includes termination codes and
6961  additional information. Termination codes are listed below,
6962  see comments for this structure for more info.
6963  Termination code is stored in rep.terminationtype field:
6964  * -8 optimizer detected NAN/INF values either in the
6965  function itself, or in its Jacobian
6966  * -7 derivative correctness check failed;
6967  see rep.funcidx, rep.varidx for
6968  more information.
6969  * -3 constraints are inconsistent
6970  * 2 relative step is no more than EpsX.
6971  * 5 MaxIts steps was taken
6972  * 7 stopping conditions are too stringent,
6973  further improvement is impossible
6974  * 8 terminated by user who called minlmrequesttermination().
6975  X contains point which was "current accepted" when
6976  termination request was submitted.
6977 
6978  -- ALGLIB --
6979  Copyright 10.03.2009 by Bochkanov Sergey
6980 *************************************************************************/
6981 void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &rep);
6982 
6983 
6984 /*************************************************************************
6985 Levenberg-Marquardt algorithm results
6986 
6987 Buffered implementation of MinLMResults(), which uses pre-allocated buffer
6988 to store X[]. If buffer size is too small, it resizes buffer. It is
6989 intended to be used in the inner cycles of performance critical algorithms
6990 where array reallocation penalty is too large to be ignored.
6991 
6992  -- ALGLIB --
6993  Copyright 10.03.2009 by Bochkanov Sergey
6994 *************************************************************************/
6996 
6997 
6998 /*************************************************************************
6999 This subroutine restarts LM algorithm from new point. All optimization
7000 parameters are left unchanged.
7001 
7002 This function allows to solve multiple optimization problems (which
7003 must have same number of dimensions) without object reallocation penalty.
7004 
7005 INPUT PARAMETERS:
7006  State - structure used for reverse communication previously
7007  allocated with MinLMCreateXXX call.
7008  X - new starting point.
7009 
7010  -- ALGLIB --
7011  Copyright 30.07.2010 by Bochkanov Sergey
7012 *************************************************************************/
7013 void minlmrestartfrom(const minlmstate &state, const real_1d_array &x);
7014 
7015 
7016 /*************************************************************************
7017 This subroutine submits request for termination of running optimizer. It
7018 should be called from user-supplied callback when user decides that it is
7019 time to "smoothly" terminate optimization process. As result, optimizer
7020 stops at point which was "current accepted" when termination request was
7021 submitted and returns error code 8 (successful termination).
7022 
7023 INPUT PARAMETERS:
7024  State - optimizer structure
7025 
7026 NOTE: after request for termination optimizer may perform several
7027  additional calls to user-supplied callbacks. It does NOT guarantee
7028  to stop immediately - it just guarantees that these additional calls
7029  will be discarded later.
7030 
7031 NOTE: calling this function on optimizer which is NOT running will have no
7032  effect.
7033 
7034 NOTE: multiple calls to this function are possible. First call is counted,
7035  subsequent calls are silently ignored.
7036 
7037  -- ALGLIB --
7038  Copyright 08.10.2014 by Bochkanov Sergey
7039 *************************************************************************/
7041 
7042 
7043 /*************************************************************************
7044 This is obsolete function.
7045 
7046 Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ().
7047 
7048  -- ALGLIB --
7049  Copyright 30.03.2009 by Bochkanov Sergey
7050 *************************************************************************/
7051 void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state);
7052 void minlmcreatevgj(const ae_int_t m, const real_1d_array &x, minlmstate &state);
7053 
7054 
7055 /*************************************************************************
7056 This is obsolete function.
7057 
7058 Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ().
7059 
7060  -- ALGLIB --
7061  Copyright 30.03.2009 by Bochkanov Sergey
7062 *************************************************************************/
7063 void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state);
7064 void minlmcreatefgj(const ae_int_t m, const real_1d_array &x, minlmstate &state);
7065 
7066 
7067 /*************************************************************************
7068 This function is considered obsolete since ALGLIB 3.1.0 and is present for
7069 backward compatibility only. We recommend to use MinLMCreateVJ, which
7070 provides similar, but more consistent and feature-rich interface.
7071 
7072  -- ALGLIB --
7073  Copyright 30.03.2009 by Bochkanov Sergey
7074 *************************************************************************/
7075 void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state);
7076 void minlmcreatefj(const ae_int_t m, const real_1d_array &x, minlmstate &state);
7077 
7078 
7079 /*************************************************************************
7080 This subroutine turns on verification of the user-supplied analytic
7081 gradient:
7082 * user calls this subroutine before optimization begins
7083 * MinLMOptimize() is called
7084 * prior to actual optimization, for each function Fi and each component
7085  of parameters being optimized X[j] algorithm performs following steps:
7086  * two trial steps are made to X[j]-TestStep*S[j] and X[j]+TestStep*S[j],
7087  where X[j] is j-th parameter and S[j] is a scale of j-th parameter
7088  * if needed, steps are bounded with respect to constraints on X[]
7089  * Fi(X) is evaluated at these trial points
7090  * we perform one more evaluation in the middle point of the interval
7091  * we build cubic model using function values and derivatives at trial
7092  points and we compare its prediction with actual value in the middle
7093  point
7094  * in case difference between prediction and actual value is higher than
7095  some predetermined threshold, algorithm stops with completion code -7;
7096  Rep.VarIdx is set to index of the parameter with incorrect derivative,
7097  Rep.FuncIdx is set to index of the function.
7098 * after verification is over, algorithm proceeds to the actual optimization.
7099 
7100 NOTE 1: verification needs N (parameters count) Jacobian evaluations. It
7101  is very costly and you should use it only for low dimensional
7102  problems, when you want to be sure that you've correctly
7103  calculated analytic derivatives. You should not use it in the
7104  production code (unless you want to check derivatives provided
7105  by some third party).
7106 
7107 NOTE 2: you should carefully choose TestStep. Value which is too large
7108  (so large that function behaviour is significantly non-cubic) will
7109  lead to false alarms. You may use different step for different
7110  parameters by means of setting scale with MinLMSetScale().
7111 
7112 NOTE 3: this function may lead to false positives. In case it reports that
7113  I-th derivative was calculated incorrectly, you may decrease test
7114  step and try one more time - maybe your function changes too
7115  sharply and your step is too large for such rapidly chanding
7116  function.
7117 
7118 INPUT PARAMETERS:
7119  State - structure used to store algorithm state
7120  TestStep - verification step:
7121  * TestStep=0 turns verification off
7122  * TestStep>0 activates verification
7123 
7124  -- ALGLIB --
7125  Copyright 15.06.2012 by Bochkanov Sergey
7126 *************************************************************************/
7127 void minlmsetgradientcheck(const minlmstate &state, const double teststep);
7128 }
7129 
7131 //
7132 // THIS SECTION CONTAINS COMPUTATIONAL CORE DECLARATIONS (FUNCTIONS)
7133 //
7135 namespace alglib_impl
7136 {
7139  /* Real */ ae_matrix* a,
7140  ae_bool isupper,
7141  double alpha,
7142  ae_state *_state);
7144  /* Real */ ae_matrix* a,
7145  ae_state *_state);
7147  /* Real */ ae_vector* z,
7148  ae_state *_state);
7150  /* Real */ ae_vector* d,
7151  double tau,
7152  ae_state *_state);
7155  /* Real */ ae_vector* b,
7156  ae_state *_state);
7158  /* Real */ ae_matrix* q,
7159  /* Real */ ae_vector* r,
7160  ae_int_t k,
7161  double theta,
7162  ae_state *_state);
7164  /* Real */ ae_vector* x,
7165  /* Boolean */ ae_vector* activeset,
7166  ae_state *_state);
7168  /* Real */ ae_vector* x,
7169  ae_state *_state);
7171  /* Real */ ae_vector* x,
7172  double* r,
7173  double* noise,
7174  ae_state *_state);
7176  /* Real */ ae_vector* x,
7177  /* Real */ ae_vector* g,
7178  ae_state *_state);
7180  /* Real */ ae_vector* x,
7181  ae_state *_state);
7183  /* Real */ ae_vector* x,
7184  /* Real */ ae_vector* y,
7185  ae_state *_state);
7187  /* Real */ ae_vector* x,
7188  ae_state *_state);
7190  /* Real */ ae_vector* x,
7191  ae_state *_state);
7193  /* Real */ ae_vector* x,
7194  ae_state *_state);
7196  /* Real */ ae_vector* x,
7197  ae_state *_state);
7198 void _convexquadraticmodel_init(void* _p, ae_state *_state);
7199 void _convexquadraticmodel_init_copy(void* _dst, void* _src, ae_state *_state);
7202 void trimprepare(double f, double* threshold, ae_state *_state);
7203 void trimfunction(double* f,
7204  /* Real */ ae_vector* g,
7205  ae_int_t n,
7206  double threshold,
7207  ae_state *_state);
7209  /* Real */ ae_vector* bl,
7210  /* Boolean */ ae_vector* havebl,
7211  /* Real */ ae_vector* bu,
7212  /* Boolean */ ae_vector* havebu,
7213  ae_int_t nmain,
7214  ae_int_t nslack,
7215  ae_state *_state);
7217  /* Real */ ae_vector* g,
7218  /* Real */ ae_vector* bl,
7219  /* Boolean */ ae_vector* havebl,
7220  /* Real */ ae_vector* bu,
7221  /* Boolean */ ae_vector* havebu,
7222  ae_int_t nmain,
7223  ae_int_t nslack,
7224  ae_state *_state);
7225 void calculatestepbound(/* Real */ ae_vector* x,
7226  /* Real */ ae_vector* d,
7227  double alpha,
7228  /* Real */ ae_vector* bndl,
7229  /* Boolean */ ae_vector* havebndl,
7230  /* Real */ ae_vector* bndu,
7231  /* Boolean */ ae_vector* havebndu,
7232  ae_int_t nmain,
7233  ae_int_t nslack,
7234  ae_int_t* variabletofreeze,
7235  double* valuetofreeze,
7236  double* maxsteplen,
7237  ae_state *_state);
7239  /* Real */ ae_vector* xprev,
7240  /* Real */ ae_vector* bndl,
7241  /* Boolean */ ae_vector* havebndl,
7242  /* Real */ ae_vector* bndu,
7243  /* Boolean */ ae_vector* havebndu,
7244  ae_int_t nmain,
7245  ae_int_t nslack,
7246  ae_int_t variabletofreeze,
7247  double valuetofreeze,
7248  double steptaken,
7249  double maxsteplen,
7250  ae_state *_state);
7251 void filterdirection(/* Real */ ae_vector* d,
7252  /* Real */ ae_vector* x,
7253  /* Real */ ae_vector* bndl,
7254  /* Boolean */ ae_vector* havebndl,
7255  /* Real */ ae_vector* bndu,
7256  /* Boolean */ ae_vector* havebndu,
7257  /* Real */ ae_vector* s,
7258  ae_int_t nmain,
7259  ae_int_t nslack,
7260  double droptol,
7261  ae_state *_state);
7263  /* Real */ ae_vector* xprev,
7264  /* Real */ ae_vector* bndl,
7265  /* Boolean */ ae_vector* havebndl,
7266  /* Real */ ae_vector* bndu,
7267  /* Boolean */ ae_vector* havebndu,
7268  ae_int_t nmain,
7269  ae_int_t nslack,
7270  ae_state *_state);
7272  /* Real */ ae_vector* bndl,
7273  /* Boolean */ ae_vector* havebndl,
7274  /* Real */ ae_vector* bndu,
7275  /* Boolean */ ae_vector* havebndu,
7276  ae_int_t nmain,
7277  ae_int_t nslack,
7278  /* Real */ ae_matrix* ce,
7279  ae_int_t k,
7280  double epsi,
7281  ae_int_t* qpits,
7282  ae_int_t* gpaits,
7283  ae_state *_state);
7285  double df0,
7286  double f1,
7287  double df1,
7288  double f,
7289  double df,
7290  double width,
7291  ae_state *_state);
7292 void estimateparabolicmodel(double absasum,
7293  double absasum2,
7294  double mx,
7295  double mb,
7296  double md,
7297  double d1,
7298  double d2,
7299  ae_int_t* d1est,
7300  ae_int_t* d2est,
7301  ae_state *_state);
7303  ae_int_t n,
7304  /* Real */ ae_vector* d,
7305  /* Real */ ae_vector* c,
7306  /* Real */ ae_matrix* w,
7307  ae_int_t k,
7308  precbuflbfgs* buf,
7309  ae_state *_state);
7311  /* Real */ ae_vector* c,
7312  /* Real */ ae_matrix* w,
7313  ae_int_t n,
7314  ae_int_t k,
7315  precbuflowrank* buf,
7316  ae_state *_state);
7318  precbuflowrank* buf,
7319  ae_state *_state);
7320 void _precbuflbfgs_init(void* _p, ae_state *_state);
7321 void _precbuflbfgs_init_copy(void* _dst, void* _src, ae_state *_state);
7322 void _precbuflbfgs_clear(void* _p);
7323 void _precbuflbfgs_destroy(void* _p);
7324 void _precbuflowrank_init(void* _p, ae_state *_state);
7325 void _precbuflowrank_init_copy(void* _dst, void* _src, ae_state *_state);
7326 void _precbuflowrank_clear(void* _p);
7328 void snnlsinit(ae_int_t nsmax,
7329  ae_int_t ndmax,
7330  ae_int_t nrmax,
7331  snnlssolver* s,
7332  ae_state *_state);
7334  /* Real */ ae_matrix* a,
7335  /* Real */ ae_vector* b,
7336  ae_int_t ns,
7337  ae_int_t nd,
7338  ae_int_t nr,
7339  ae_state *_state);
7342  /* Real */ ae_vector* x,
7343  ae_state *_state);
7344 void _snnlssolver_init(void* _p, ae_state *_state);
7345 void _snnlssolver_init_copy(void* _dst, void* _src, ae_state *_state);
7346 void _snnlssolver_clear(void* _p);
7347 void _snnlssolver_destroy(void* _p);
7348 void sasinit(ae_int_t n, sactiveset* s, ae_state *_state);
7350  /* Real */ ae_vector* s,
7351  ae_state *_state);
7353  /* Real */ ae_vector* d,
7354  ae_state *_state);
7355 void sassetbc(sactiveset* state,
7356  /* Real */ ae_vector* bndl,
7357  /* Real */ ae_vector* bndu,
7358  ae_state *_state);
7359 void sassetlc(sactiveset* state,
7360  /* Real */ ae_matrix* c,
7361  /* Integer */ ae_vector* ct,
7362  ae_int_t k,
7363  ae_state *_state);
7364 void sassetlcx(sactiveset* state,
7365  /* Real */ ae_matrix* cleic,
7366  ae_int_t nec,
7367  ae_int_t nic,
7368  ae_state *_state);
7370  /* Real */ ae_vector* x,
7371  ae_state *_state);
7373  /* Real */ ae_vector* d,
7374  double* stpmax,
7375  ae_int_t* cidx,
7376  double* vval,
7377  ae_state *_state);
7379  /* Real */ ae_vector* xn,
7380  ae_bool needact,
7381  ae_int_t cidx,
7382  double cval,
7383  ae_state *_state);
7385  ae_int_t cidx,
7386  double cval,
7387  ae_state *_state);
7389  /* Real */ ae_vector* g,
7390  /* Real */ ae_vector* d,
7391  ae_state *_state);
7393  /* Real */ ae_vector* g,
7394  /* Real */ ae_vector* d,
7395  ae_state *_state);
7397  /* Real */ ae_vector* d,
7398  ae_state *_state);
7400  /* Real */ ae_vector* d,
7401  ae_state *_state);
7403  /* Real */ ae_vector* x,
7404  double* penalty,
7405  ae_state *_state);
7407  /* Real */ ae_vector* x,
7408  ae_state *_state);
7410  /* Real */ ae_vector* d,
7411  ae_state *_state);
7414  /* Real */ ae_vector* gc,
7415  ae_state *_state);
7417  /* Real */ ae_vector* gc,
7418  ae_state *_state);
7419 void sasrebuildbasis(sactiveset* state, ae_state *_state);
7420 void _sactiveset_init(void* _p, ae_state *_state);
7421 void _sactiveset_init_copy(void* _dst, void* _src, ae_state *_state);
7422 void _sactiveset_clear(void* _p);
7423 void _sactiveset_destroy(void* _p);
7427  sparsematrix* sparseac,
7428  /* Real */ ae_matrix* denseac,
7429  ae_int_t akind,
7430  ae_bool isupper,
7431  /* Real */ ae_vector* bc,
7432  /* Real */ ae_vector* bndlc,
7433  /* Real */ ae_vector* bnduc,
7434  /* Real */ ae_vector* sc,
7435  /* Real */ ae_vector* xoriginc,
7436  ae_int_t nc,
7437  /* Real */ ae_matrix* cleicc,
7438  ae_int_t nec,
7439  ae_int_t nic,
7440  qqpsettings* settings,
7441  qqpbuffers* sstate,
7442  /* Real */ ae_vector* xs,
7443  ae_int_t* terminationtype,
7444  ae_state *_state);
7445 void _qqpsettings_init(void* _p, ae_state *_state);
7446 void _qqpsettings_init_copy(void* _dst, void* _src, ae_state *_state);
7447 void _qqpsettings_clear(void* _p);
7448 void _qqpsettings_destroy(void* _p);
7449 void _qqpbuffers_init(void* _p, ae_state *_state);
7450 void _qqpbuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7451 void _qqpbuffers_clear(void* _p);
7452 void _qqpbuffers_destroy(void* _p);
7454  ae_int_t m,
7455  /* Real */ ae_vector* x,
7456  minlbfgsstate* state,
7457  ae_state *_state);
7459  ae_int_t m,
7460  /* Real */ ae_vector* x,
7461  double diffstep,
7462  minlbfgsstate* state,
7463  ae_state *_state);
7465  double epsg,
7466  double epsf,
7467  double epsx,
7468  ae_int_t maxits,
7469  ae_state *_state);
7471  ae_bool needxrep,
7472  ae_state *_state);
7474  double stpmax,
7475  ae_state *_state);
7477  /* Real */ ae_vector* s,
7478  ae_state *_state);
7480  ae_int_t m,
7481  /* Real */ ae_vector* x,
7482  ae_int_t flags,
7483  double diffstep,
7484  minlbfgsstate* state,
7485  ae_state *_state);
7488  /* Real */ ae_matrix* p,
7489  ae_bool isupper,
7490  ae_state *_state);
7492  /* Real */ ae_vector* d,
7493  ae_state *_state);
7496  /* Real */ ae_vector* d,
7497  /* Real */ ae_vector* c,
7498  /* Real */ ae_matrix* w,
7499  ae_int_t cnt,
7500  ae_state *_state);
7502  /* Real */ ae_vector* d,
7503  /* Real */ ae_vector* c,
7504  /* Real */ ae_matrix* w,
7505  ae_int_t cnt,
7506  ae_state *_state);
7509  /* Real */ ae_vector* x,
7510  minlbfgsreport* rep,
7511  ae_state *_state);
7513  /* Real */ ae_vector* x,
7514  minlbfgsreport* rep,
7515  ae_state *_state);
7517  /* Real */ ae_vector* x,
7518  ae_state *_state);
7521  double teststep,
7522  ae_state *_state);
7523 void _minlbfgsstate_init(void* _p, ae_state *_state);
7524 void _minlbfgsstate_init_copy(void* _dst, void* _src, ae_state *_state);
7525 void _minlbfgsstate_clear(void* _p);
7526 void _minlbfgsstate_destroy(void* _p);
7527 void _minlbfgsreport_init(void* _p, ae_state *_state);
7528 void _minlbfgsreport_init_copy(void* _dst, void* _src, ae_state *_state);
7529 void _minlbfgsreport_clear(void* _p);
7532  qpdenseaulsettings* s,
7533  ae_state *_state);
7535  sparsematrix* sparsea,
7536  ae_int_t akind,
7537  ae_bool sparseaupper,
7538  /* Real */ ae_vector* b,
7539  /* Real */ ae_vector* bndl,
7540  /* Real */ ae_vector* bndu,
7541  /* Real */ ae_vector* s,
7542  /* Real */ ae_vector* xorigin,
7543  ae_int_t nn,
7544  /* Real */ ae_matrix* cleic,
7545  ae_int_t dnec,
7546  ae_int_t dnic,
7547  sparsematrix* scleic,
7548  ae_int_t snec,
7549  ae_int_t snic,
7550  ae_bool renormlc,
7551  qpdenseaulsettings* settings,
7552  qpdenseaulbuffers* state,
7553  /* Real */ ae_vector* xs,
7554  ae_int_t* terminationtype,
7555  ae_state *_state);
7556 void _qpdenseaulsettings_init(void* _p, ae_state *_state);
7557 void _qpdenseaulsettings_init_copy(void* _dst, void* _src, ae_state *_state);
7560 void _qpdenseaulbuffers_init(void* _p, ae_state *_state);
7561 void _qpdenseaulbuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7565  qpcholeskysettings* s,
7566  ae_state *_state);
7568  qpcholeskysettings* dst,
7569  ae_state *_state);
7571  double anorm,
7572  /* Real */ ae_vector* b,
7573  /* Real */ ae_vector* bndl,
7574  /* Real */ ae_vector* bndu,
7575  /* Real */ ae_vector* s,
7576  /* Real */ ae_vector* xorigin,
7577  ae_int_t n,
7578  /* Real */ ae_matrix* cleic,
7579  ae_int_t nec,
7580  ae_int_t nic,
7581  qpcholeskybuffers* sstate,
7582  /* Real */ ae_vector* xsc,
7583  ae_int_t* terminationtype,
7584  ae_state *_state);
7585 void _qpcholeskysettings_init(void* _p, ae_state *_state);
7586 void _qpcholeskysettings_init_copy(void* _dst, void* _src, ae_state *_state);
7589 void _qpcholeskybuffers_init(void* _p, ae_state *_state);
7590 void _qpcholeskybuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7594  /* Real */ ae_vector* x,
7595  mincgstate* state,
7596  ae_state *_state);
7598  /* Real */ ae_vector* x,
7599  double diffstep,
7600  mincgstate* state,
7601  ae_state *_state);
7603  double epsg,
7604  double epsf,
7605  double epsx,
7606  ae_int_t maxits,
7607  ae_state *_state);
7609  /* Real */ ae_vector* s,
7610  ae_state *_state);
7611 void mincgsetxrep(mincgstate* state, ae_bool needxrep, ae_state *_state);
7612 void mincgsetdrep(mincgstate* state, ae_bool needdrep, ae_state *_state);
7613 void mincgsetcgtype(mincgstate* state, ae_int_t cgtype, ae_state *_state);
7614 void mincgsetstpmax(mincgstate* state, double stpmax, ae_state *_state);
7615 void mincgsuggeststep(mincgstate* state, double stp, ae_state *_state);
7616 double mincglastgoodstep(mincgstate* state, ae_state *_state);
7619  /* Real */ ae_vector* d,
7620  ae_state *_state);
7621 void mincgsetprecscale(mincgstate* state, ae_state *_state);
7624  /* Real */ ae_vector* x,
7625  mincgreport* rep,
7626  ae_state *_state);
7628  /* Real */ ae_vector* x,
7629  mincgreport* rep,
7630  ae_state *_state);
7632  /* Real */ ae_vector* x,
7633  ae_state *_state);
7636  /* Real */ ae_vector* d,
7637  ae_state *_state);
7639  /* Real */ ae_vector* d1,
7640  /* Real */ ae_vector* c,
7641  /* Real */ ae_matrix* v,
7642  ae_int_t vcnt,
7643  ae_state *_state);
7645  /* Real */ ae_vector* d2,
7646  ae_state *_state);
7648  double teststep,
7649  ae_state *_state);
7650 void _mincgstate_init(void* _p, ae_state *_state);
7651 void _mincgstate_init_copy(void* _dst, void* _src, ae_state *_state);
7652 void _mincgstate_clear(void* _p);
7653 void _mincgstate_destroy(void* _p);
7654 void _mincgreport_init(void* _p, ae_state *_state);
7655 void _mincgreport_init_copy(void* _dst, void* _src, ae_state *_state);
7656 void _mincgreport_clear(void* _p);
7657 void _mincgreport_destroy(void* _p);
7659  /* Real */ ae_vector* x,
7660  minbleicstate* state,
7661  ae_state *_state);
7663  /* Real */ ae_vector* x,
7664  double diffstep,
7665  minbleicstate* state,
7666  ae_state *_state);
7668  /* Real */ ae_vector* bndl,
7669  /* Real */ ae_vector* bndu,
7670  ae_state *_state);
7672  /* Real */ ae_matrix* c,
7673  /* Integer */ ae_vector* ct,
7674  ae_int_t k,
7675  ae_state *_state);
7677  double epsg,
7678  double epsf,
7679  double epsx,
7680  ae_int_t maxits,
7681  ae_state *_state);
7683  /* Real */ ae_vector* s,
7684  ae_state *_state);
7687  /* Real */ ae_vector* d,
7688  ae_state *_state);
7691  ae_bool needxrep,
7692  ae_state *_state);
7694  ae_bool needdrep,
7695  ae_state *_state);
7697  double stpmax,
7698  ae_state *_state);
7701  /* Real */ ae_vector* x,
7702  minbleicreport* rep,
7703  ae_state *_state);
7705  /* Real */ ae_vector* x,
7706  minbleicreport* rep,
7707  ae_state *_state);
7709  /* Real */ ae_vector* x,
7710  ae_state *_state);
7714  double teststep,
7715  ae_state *_state);
7716 void _minbleicstate_init(void* _p, ae_state *_state);
7717 void _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state);
7718 void _minbleicstate_clear(void* _p);
7719 void _minbleicstate_destroy(void* _p);
7720 void _minbleicreport_init(void* _p, ae_state *_state);
7721 void _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state);
7722 void _minbleicreport_clear(void* _p);
7725  qpbleicsettings* s,
7726  ae_state *_state);
7728  qpbleicsettings* dst,
7729  ae_state *_state);
7731  sparsematrix* sparsea,
7732  ae_int_t akind,
7733  ae_bool sparseaupper,
7734  double absasum,
7735  double absasum2,
7736  /* Real */ ae_vector* b,
7737  /* Real */ ae_vector* bndl,
7738  /* Real */ ae_vector* bndu,
7739  /* Real */ ae_vector* s,
7740  /* Real */ ae_vector* xorigin,
7741  ae_int_t n,
7742  /* Real */ ae_matrix* cleic,
7743  ae_int_t nec,
7744  ae_int_t nic,
7745  qpbleicsettings* settings,
7746  qpbleicbuffers* sstate,
7747  ae_bool* firstcall,
7748  /* Real */ ae_vector* xs,
7749  ae_int_t* terminationtype,
7750  ae_state *_state);
7751 void _qpbleicsettings_init(void* _p, ae_state *_state);
7752 void _qpbleicsettings_init_copy(void* _dst, void* _src, ae_state *_state);
7753 void _qpbleicsettings_clear(void* _p);
7755 void _qpbleicbuffers_init(void* _p, ae_state *_state);
7756 void _qpbleicbuffers_init_copy(void* _dst, void* _src, ae_state *_state);
7757 void _qpbleicbuffers_clear(void* _p);
7759 void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state);
7761  /* Real */ ae_vector* b,
7762  ae_state *_state);
7764  /* Real */ ae_matrix* a,
7765  ae_bool isupper,
7766  ae_state *_state);
7768  sparsematrix* a,
7769  ae_bool isupper,
7770  ae_state *_state);
7772  /* Real */ ae_vector* x,
7773  ae_state *_state);
7775  /* Real */ ae_vector* xorigin,
7776  ae_state *_state);
7778  /* Real */ ae_vector* s,
7779  ae_state *_state);
7782  double epsg,
7783  double epsf,
7784  double epsx,
7785  ae_int_t maxits,
7786  ae_state *_state);
7788  double epsx,
7789  double rho,
7790  ae_int_t itscnt,
7791  ae_state *_state);
7793  double epsg,
7794  double epsf,
7795  double epsx,
7796  ae_int_t maxouterits,
7797  ae_bool usenewton,
7798  ae_state *_state);
7800  /* Real */ ae_vector* bndl,
7801  /* Real */ ae_vector* bndu,
7802  ae_state *_state);
7804  /* Real */ ae_matrix* c,
7805  /* Integer */ ae_vector* ct,
7806  ae_int_t k,
7807  ae_state *_state);
7809  sparsematrix* c,
7810  /* Integer */ ae_vector* ct,
7811  ae_int_t k,
7812  ae_state *_state);
7814  /* Real */ ae_matrix* densec,
7815  /* Integer */ ae_vector* densect,
7816  ae_int_t densek,
7817  sparsematrix* sparsec,
7818  /* Integer */ ae_vector* sparsect,
7819  ae_int_t sparsek,
7820  ae_state *_state);
7821 void minqpoptimize(minqpstate* state, ae_state *_state);
7823  /* Real */ ae_vector* x,
7824  minqpreport* rep,
7825  ae_state *_state);
7827  /* Real */ ae_vector* x,
7828  minqpreport* rep,
7829  ae_state *_state);
7831  /* Real */ ae_vector* b,
7832  ae_state *_state);
7834  /* Real */ ae_matrix* a,
7835  ae_bool isupper,
7836  double s,
7837  ae_state *_state);
7839  /* Real */ ae_vector* s,
7840  ae_state *_state);
7842  /* Real */ ae_vector* x,
7843  ae_state *_state);
7845  /* Real */ ae_vector* xorigin,
7846  ae_state *_state);
7847 void _minqpstate_init(void* _p, ae_state *_state);
7848 void _minqpstate_init_copy(void* _dst, void* _src, ae_state *_state);
7849 void _minqpstate_clear(void* _p);
7850 void _minqpstate_destroy(void* _p);
7851 void _minqpreport_init(void* _p, ae_state *_state);
7852 void _minqpreport_init_copy(void* _dst, void* _src, ae_state *_state);
7853 void _minqpreport_clear(void* _p);
7854 void _minqpreport_destroy(void* _p);
7856  /* Real */ ae_vector* x,
7857  minnlcstate* state,
7858  ae_state *_state);
7860  /* Real */ ae_vector* x,
7861  double diffstep,
7862  minnlcstate* state,
7863  ae_state *_state);
7865  /* Real */ ae_vector* bndl,
7866  /* Real */ ae_vector* bndu,
7867  ae_state *_state);
7869  /* Real */ ae_matrix* c,
7870  /* Integer */ ae_vector* ct,
7871  ae_int_t k,
7872  ae_state *_state);
7874  ae_int_t nlec,
7875  ae_int_t nlic,
7876  ae_state *_state);
7878  double epsg,
7879  double epsf,
7880  double epsx,
7881  ae_int_t maxits,
7882  ae_state *_state);
7884  /* Real */ ae_vector* s,
7885  ae_state *_state);
7888  ae_int_t updatefreq,
7889  ae_state *_state);
7891  ae_int_t updatefreq,
7892  ae_state *_state);
7894 void minnlcsetstpmax(minnlcstate* state, double stpmax, ae_state *_state);
7896  double rho,
7897  ae_int_t itscnt,
7898  ae_state *_state);
7899 void minnlcsetxrep(minnlcstate* state, ae_bool needxrep, ae_state *_state);
7902  /* Real */ ae_vector* x,
7903  minnlcreport* rep,
7904  ae_state *_state);
7906  /* Real */ ae_vector* x,
7907  minnlcreport* rep,
7908  ae_state *_state);
7910  /* Real */ ae_vector* x,
7911  ae_state *_state);
7913  double teststep,
7914  ae_state *_state);
7916  double* f,
7917  double* df,
7918  double* d2f,
7919  ae_state *_state);
7921  double stabilizingpoint,
7922  double* f,
7923  double* df,
7924  double* d2f,
7925  ae_state *_state);
7927  double* f,
7928  double* df,
7929  double* d2f,
7930  ae_state *_state);
7931 void _minnlcstate_init(void* _p, ae_state *_state);
7932 void _minnlcstate_init_copy(void* _dst, void* _src, ae_state *_state);
7933 void _minnlcstate_clear(void* _p);
7934 void _minnlcstate_destroy(void* _p);
7935 void _minnlcreport_init(void* _p, ae_state *_state);
7936 void _minnlcreport_init_copy(void* _dst, void* _src, ae_state *_state);
7937 void _minnlcreport_clear(void* _p);
7938 void _minnlcreport_destroy(void* _p);
7940  /* Real */ ae_vector* x,
7941  minbcstate* state,
7942  ae_state *_state);
7944  /* Real */ ae_vector* x,
7945  double diffstep,
7946  minbcstate* state,
7947  ae_state *_state);
7949  /* Real */ ae_vector* bndl,
7950  /* Real */ ae_vector* bndu,
7951  ae_state *_state);
7953  double epsg,
7954  double epsf,
7955  double epsx,
7956  ae_int_t maxits,
7957  ae_state *_state);
7959  /* Real */ ae_vector* s,
7960  ae_state *_state);
7963  /* Real */ ae_vector* d,
7964  ae_state *_state);
7965 void minbcsetprecscale(minbcstate* state, ae_state *_state);
7966 void minbcsetxrep(minbcstate* state, ae_bool needxrep, ae_state *_state);
7967 void minbcsetstpmax(minbcstate* state, double stpmax, ae_state *_state);
7970  /* Real */ ae_vector* x,
7971  minbcreport* rep,
7972  ae_state *_state);
7974  /* Real */ ae_vector* x,
7975  minbcreport* rep,
7976  ae_state *_state);
7978  /* Real */ ae_vector* x,
7979  ae_state *_state);
7982  double teststep,
7983  ae_state *_state);
7984 void _minbcstate_init(void* _p, ae_state *_state);
7985 void _minbcstate_init_copy(void* _dst, void* _src, ae_state *_state);
7986 void _minbcstate_clear(void* _p);
7987 void _minbcstate_destroy(void* _p);
7988 void _minbcreport_init(void* _p, ae_state *_state);
7989 void _minbcreport_init_copy(void* _dst, void* _src, ae_state *_state);
7990 void _minbcreport_clear(void* _p);
7991 void _minbcreport_destroy(void* _p);
7993  /* Real */ ae_vector* x,
7994  minnsstate* state,
7995  ae_state *_state);
7997  /* Real */ ae_vector* x,
7998  double diffstep,
7999  minnsstate* state,
8000  ae_state *_state);
8002  /* Real */ ae_vector* bndl,
8003  /* Real */ ae_vector* bndu,
8004  ae_state *_state);
8006  /* Real */ ae_matrix* c,
8007  /* Integer */ ae_vector* ct,
8008  ae_int_t k,
8009  ae_state *_state);
8011  ae_int_t nlec,
8012  ae_int_t nlic,
8013  ae_state *_state);
8015  double epsx,
8016  ae_int_t maxits,
8017  ae_state *_state);
8019  /* Real */ ae_vector* s,
8020  ae_state *_state);
8022  double radius,
8023  double penalty,
8024  ae_state *_state);
8025 void minnssetxrep(minnsstate* state, ae_bool needxrep, ae_state *_state);
8029  /* Real */ ae_vector* x,
8030  minnsreport* rep,
8031  ae_state *_state);
8033  /* Real */ ae_vector* x,
8034  minnsreport* rep,
8035  ae_state *_state);
8037  /* Real */ ae_vector* x,
8038  ae_state *_state);
8039 void _minnsqp_init(void* _p, ae_state *_state);
8040 void _minnsqp_init_copy(void* _dst, void* _src, ae_state *_state);
8041 void _minnsqp_clear(void* _p);
8042 void _minnsqp_destroy(void* _p);
8043 void _minnsstate_init(void* _p, ae_state *_state);
8044 void _minnsstate_init_copy(void* _dst, void* _src, ae_state *_state);
8045 void _minnsstate_clear(void* _p);
8046 void _minnsstate_destroy(void* _p);
8047 void _minnsreport_init(void* _p, ae_state *_state);
8048 void _minnsreport_init_copy(void* _dst, void* _src, ae_state *_state);
8049 void _minnsreport_clear(void* _p);
8050 void _minnsreport_destroy(void* _p);
8052  ae_state *_state);
8054  /* Real */ ae_matrix* p,
8055  ae_bool isupper,
8056  ae_state *_state);
8058  double mu,
8059  ae_state *_state);
8061  double mudecay,
8062  ae_state *_state);
8064  /* Real */ ae_vector* x,
8065  /* Real */ ae_vector* bndl,
8066  /* Real */ ae_vector* bndu,
8067  minasastate* state,
8068  ae_state *_state);
8070  double epsg,
8071  double epsf,
8072  double epsx,
8073  ae_int_t maxits,
8074  ae_state *_state);
8075 void minasasetxrep(minasastate* state, ae_bool needxrep, ae_state *_state);
8077  ae_int_t algotype,
8078  ae_state *_state);
8079 void minasasetstpmax(minasastate* state, double stpmax, ae_state *_state);
8082  /* Real */ ae_vector* x,
8083  minasareport* rep,
8084  ae_state *_state);
8086  /* Real */ ae_vector* x,
8087  minasareport* rep,
8088  ae_state *_state);
8090  /* Real */ ae_vector* x,
8091  /* Real */ ae_vector* bndl,
8092  /* Real */ ae_vector* bndu,
8093  ae_state *_state);
8094 void _minasastate_init(void* _p, ae_state *_state);
8095 void _minasastate_init_copy(void* _dst, void* _src, ae_state *_state);
8096 void _minasastate_clear(void* _p);
8097 void _minasastate_destroy(void* _p);
8098 void _minasareport_init(void* _p, ae_state *_state);
8099 void _minasareport_init_copy(void* _dst, void* _src, ae_state *_state);
8100 void _minasareport_clear(void* _p);
8101 void _minasareport_destroy(void* _p);
8103  ae_int_t m,
8104  /* Real */ ae_vector* x,
8105  minlmstate* state,
8106  ae_state *_state);
8108  ae_int_t m,
8109  /* Real */ ae_vector* x,
8110  double diffstep,
8111  minlmstate* state,
8112  ae_state *_state);
8114  /* Real */ ae_vector* x,
8115  minlmstate* state,
8116  ae_state *_state);
8118  double epsx,
8119  ae_int_t maxits,
8120  ae_state *_state);
8121 void minlmsetxrep(minlmstate* state, ae_bool needxrep, ae_state *_state);
8122 void minlmsetstpmax(minlmstate* state, double stpmax, ae_state *_state);
8124  /* Real */ ae_vector* s,
8125  ae_state *_state);
8127  /* Real */ ae_vector* bndl,
8128  /* Real */ ae_vector* bndu,
8129  ae_state *_state);
8131  /* Real */ ae_matrix* c,
8132  /* Integer */ ae_vector* ct,
8133  ae_int_t k,
8134  ae_state *_state);
8136  ae_int_t acctype,
8137  ae_state *_state);
8140  /* Real */ ae_vector* x,
8141  minlmreport* rep,
8142  ae_state *_state);
8144  /* Real */ ae_vector* x,
8145  minlmreport* rep,
8146  ae_state *_state);
8148  /* Real */ ae_vector* x,
8149  ae_state *_state);
8152  ae_int_t m,
8153  /* Real */ ae_vector* x,
8154  minlmstate* state,
8155  ae_state *_state);
8157  ae_int_t m,
8158  /* Real */ ae_vector* x,
8159  minlmstate* state,
8160  ae_state *_state);
8162  ae_int_t m,
8163  /* Real */ ae_vector* x,
8164  minlmstate* state,
8165  ae_state *_state);
8167  double teststep,
8168  ae_state *_state);
8169 void _minlmstepfinder_init(void* _p, ae_state *_state);
8170 void _minlmstepfinder_init_copy(void* _dst, void* _src, ae_state *_state);
8171 void _minlmstepfinder_clear(void* _p);
8173 void _minlmstate_init(void* _p, ae_state *_state);
8174 void _minlmstate_init_copy(void* _dst, void* _src, ae_state *_state);
8175 void _minlmstate_clear(void* _p);
8176 void _minlmstate_destroy(void* _p);
8177 void _minlmreport_init(void* _p, ae_state *_state);
8178 void _minlmreport_init_copy(void* _dst, void* _src, ae_state *_state);
8179 void _minlmreport_clear(void* _p);
8180 void _minlmreport_destroy(void* _p);
8181 
8182 }
8183 #endif
8184 
ae_int_t postprocessboundedstep(ae_vector *x, ae_vector *xprev, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_int_t nmain, ae_int_t nslack, ae_int_t variabletofreeze, double valuetofreeze, double steptaken, double maxsteplen, ae_state *_state)
alglib_impl::minqpreport * p_struct
void qqpoptimize(convexquadraticmodel *cqmac, sparsematrix *sparseac, ae_matrix *denseac, ae_int_t akind, ae_bool isupper, ae_vector *bc, ae_vector *bndlc, ae_vector *bnduc, ae_vector *sc, ae_vector *xoriginc, ae_int_t nc, ae_matrix *cleicc, ae_int_t nec, ae_int_t nic, qqpsettings *settings, qqpbuffers *sstate, ae_vector *xs, ae_int_t *terminationtype, ae_state *_state)
alglib_impl::minbcreport * p_struct
void cqmsetq(convexquadraticmodel *s, ae_matrix *q, ae_vector *r, ae_int_t k, double theta, ae_state *_state)
virtual ~minqpstate()
void minqpsetalgodenseaul(const minqpstate &state, const double epsx, const double rho, const ae_int_t itscnt)
double cqmeval(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void sasimmediateactivation(sactiveset *state, ae_int_t cidx, double cval, ae_state *_state)
bool minbciteration(const minbcstate &state)
_minqpstate_owner & operator=(const _minqpstate_owner &rhs)
minnsstate(const minnsstate &rhs)
void _minlbfgsreport_init(void *_p, ae_state *_state)
void minlbfgsrequesttermination(const minlbfgsstate &state)
void minqpsetlineartermfast(minqpstate *state, ae_vector *b, ae_state *_state)
minlbfgsreport(const minlbfgsreport &rhs)
void minlmoptimize(minlmstate &state, void(*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr)=NULL, void *ptr=NULL)
void _minbcreport_destroy(void *_p)
void minbcoptimize(minbcstate &state, void(*func)(const real_1d_array &x, double &func, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr)=NULL, void *ptr=NULL)
void cqmevalx(convexquadraticmodel *s, ae_vector *x, double *r, double *noise, ae_state *_state)
void mincgsetprecscale(const mincgstate &state)
ae_int_t sasmoveto(sactiveset *state, ae_vector *xn, ae_bool needact, ae_int_t cidx, double cval, ae_state *_state)
void _qpcholeskybuffers_clear(void *_p)
minnlcreport(const minnlcreport &rhs)
void cqminit(ae_int_t n, convexquadraticmodel *s, ae_state *_state)
void minasarestartfrom(minasastate *state, ae_vector *x, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
void minbleicresultsbuf(const minbleicstate &state, real_1d_array &x, minbleicreport &rep)
void cqmsetactiveset(convexquadraticmodel *s, ae_vector *x, ae_vector *activeset, ae_state *_state)
void minlbfgssetstpmax(minlbfgsstate *state, double stpmax, ae_state *_state)
ae_bool sasstartoptimization(sactiveset *state, ae_vector *x, ae_state *_state)
void _qqpsettings_destroy(void *_p)
void _precbuflowrank_init_copy(void *_dst, void *_src, ae_state *_state)
void minnssetcond(const minnsstate &state, const double epsx, const ae_int_t maxits)
real_2d_array j
void sassetbc(sactiveset *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
void minnssetcond(minnsstate *state, double epsx, ae_int_t maxits, ae_state *_state)
void minbccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbcstate &state)
void minbcresultsbuf(minbcstate *state, ae_vector *x, minbcreport *rep, ae_state *_state)
void _mincgreport_init(void *_p, ae_state *_state)
void qpcholeskyoptimize(convexquadraticmodel *a, double anorm, ae_vector *b, ae_vector *bndl, ae_vector *bndu, ae_vector *s, ae_vector *xorigin, ae_int_t n, ae_matrix *cleic, ae_int_t nec, ae_int_t nic, qpcholeskybuffers *sstate, ae_vector *xsc, ae_int_t *terminationtype, ae_state *_state)
void _qpdenseaulsettings_init_copy(void *_dst, void *_src, ae_state *_state)
void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
void sasconstraineddescentprec(sactiveset *state, ae_vector *g, ae_vector *d, ae_state *_state)
ae_bool cqmconstrainedoptimum(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void _minqpstate_init(void *_p, ae_state *_state)
void minnlccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnlcstate &state)
void minnscreate(const ae_int_t n, const real_1d_array &x, minnsstate &state)
alglib_impl::minnlcstate * p_struct
void minlbfgssetdefaultpreconditioner(const minlbfgsstate &state)
void minnlcsetprecinexact(minnlcstate *state, ae_state *_state)
void snnlsdropnnc(snnlssolver *s, ae_int_t idx, ae_state *_state)
void minbcsetbc(const minbcstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
void minnlcsetprecexactlowrank(minnlcstate *state, ae_int_t updatefreq, ae_state *_state)
alglib_impl::minnlcreport * c_ptr()
void cqmgeta(convexquadraticmodel *s, ae_matrix *a, ae_state *_state)
void _qpbleicsettings_destroy(void *_p)
void minbleicsetcond(const minbleicstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void mincgcreatef(ae_int_t n, ae_vector *x, double diffstep, mincgstate *state, ae_state *_state)
void minbleicrestartfrom(minbleicstate *state, ae_vector *x, ae_state *_state)
void minlbfgsrequesttermination(minlbfgsstate *state, ae_state *_state)
void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k)
void projectgradientintobc(ae_vector *x, ae_vector *g, ae_vector *bl, ae_vector *havebl, ae_vector *bu, ae_vector *havebu, ae_int_t nmain, ae_int_t nslack, ae_state *_state)
void minnssetnlc(minnsstate *state, ae_int_t nlec, ae_int_t nlic, ae_state *_state)
void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state)
void minasasetcond(minasastate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k)
void minnlcequalitypenaltyfunction(double alpha, double *f, double *df, double *d2f, ae_state *_state)
ae_bool minnsiteration(minnsstate *state, ae_state *_state)
void minbcsetscale(minbcstate *state, ae_vector *s, ae_state *_state)
void mincgsetgradientcheck(const mincgstate &state, const double teststep)
void _minnsqp_clear(void *_p)
void minqpsetalgoquickqp(minqpstate *state, double epsg, double epsf, double epsx, ae_int_t maxouterits, ae_bool usenewton, ae_state *_state)
void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state)
void mincgoptimize(mincgstate &state, void(*func)(const real_1d_array &x, double &func, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr)=NULL, void *ptr=NULL)
void minlbfgssetprecdefault(minlbfgsstate *state, ae_state *_state)
minnsreport(const minnsreport &rhs)
void minbleicoptimize(minbleicstate &state, void(*func)(const real_1d_array &x, double &func, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr)=NULL, void *ptr=NULL)
void snnlsinit(ae_int_t nsmax, ae_int_t ndmax, ae_int_t nrmax, snnlssolver *s, ae_state *_state)
void minbcrequesttermination(const minbcstate &state)
void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s)
void minlmsetgradientcheck(const minlmstate &state, const double teststep)
void _mincgstate_destroy(void *_p)
void filterdirection(ae_vector *d, ae_vector *x, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_vector *s, ae_int_t nmain, ae_int_t nslack, double droptol, ae_state *_state)
void minnsoptimize(minnsstate &state, void(*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr)=NULL, void *ptr=NULL)
void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax)
ae_bool minlmiteration(minlmstate *state, ae_state *_state)
void minbleicemergencytermination(minbleicstate *state, ae_state *_state)
void _convexquadraticmodel_init(void *_p, ae_state *_state)
void _mincgreport_destroy(void *_p)
void _minqpstate_init_copy(void *_dst, void *_src, ae_state *_state)
void minbleicsetdrep(minbleicstate *state, ae_bool needdrep, ae_state *_state)
real_2d_array j
void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &rep)
void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x)
void _minbleicstate_init_copy(void *_dst, void *_src, ae_state *_state)
void minlbfgssetprecscale(const minlbfgsstate &state)
void mincgsetstpmax(mincgstate *state, double stpmax, ae_state *_state)
void minlbfgssetprecrankklbfgsfast(minlbfgsstate *state, ae_vector *d, ae_vector *c, ae_matrix *w, ae_int_t cnt, ae_state *_state)
virtual ~minnsreport()
void mincgresultsbuf(mincgstate *state, ae_vector *x, mincgreport *rep, ae_state *_state)
void trimprepare(double f, double *threshold, ae_state *_state)
void minasaresults(minasastate *state, ae_vector *x, minasareport *rep, ae_state *_state)
void sasreactivateconstraintsprec(sactiveset *state, ae_vector *gc, ae_state *_state)
void _minbleicstate_init(void *_p, ae_state *_state)
alglib_impl::minbleicstate * p_struct
bool minlmiteration(const minlmstate &state)
alglib_impl::minbcstate * c_ptr()
alglib_impl::mincgstate * p_struct
void sasstopoptimization(sactiveset *state, ae_state *_state)
alglib_impl::minnlcreport * p_struct
void _mincgreport_init_copy(void *_dst, void *_src, ae_state *_state)
alglib_impl::minasastate * c_ptr()
ae_int_t & iterationscount
void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &rep)
void minnssetlc(const minnsstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k)
void _minnlcstate_clear(void *_p)
void _mincgreport_clear(void *_p)
void minlmsetbc(minlmstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
void minnlccreate(const ae_int_t n, const real_1d_array &x, minnlcstate &state)
void minlmrequesttermination(const minlmstate &state)
void minasasetxrep(minasastate *state, ae_bool needxrep, ae_state *_state)
void minnssetalgoags(const minnsstate &state, const double radius, const double penalty)
void mincgrequesttermination(const mincgstate &state)
void minnlcresults(minnlcstate *state, ae_vector *x, minnlcreport *rep, ae_state *_state)
void minlmcreatefj(ae_int_t n, ae_int_t m, ae_vector *x, minlmstate *state, ae_state *_state)
void _qqpbuffers_clear(void *_p)
void applylowrankpreconditioner(ae_vector *s, precbuflowrank *buf, ae_state *_state)
void minlmrestartfrom(const minlmstate &state, const real_1d_array &x)
void _qpdenseaulbuffers_init_copy(void *_dst, void *_src, ae_state *_state)
ae_int_t & terminationtype
void _qpdenseaulsettings_init(void *_p, ae_state *_state)
void minbcsetprecdiag(const minbcstate &state, const real_1d_array &d)
void _minnlcstate_init_copy(void *_dst, void *_src, ae_state *_state)
ae_int_t & terminationtype
void minqpsetscale(const minqpstate &state, const real_1d_array &s)
_minnlcstate_owner & operator=(const _minnlcstate_owner &rhs)
void minnlcsetscale(const minnlcstate &state, const real_1d_array &s)
void minlbfgsresults(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep)
void minqpsetstartingpointfast(minqpstate *state, ae_vector *x, ae_state *_state)
void sasinit(ae_int_t n, sactiveset *s, ae_state *_state)
alglib_impl::minlbfgsstate * p_struct
void _minqpreport_clear(void *_p)
void minlmresultsbuf(minlmstate *state, ae_vector *x, minlmreport *rep, ae_state *_state)
void minbleicsetbarrierdecay(const minbleicstate &state, const double mudecay)
void _qpcholeskysettings_clear(void *_p)
void mincgsetgradientcheck(mincgstate *state, double teststep, ae_state *_state)
void _sactiveset_init_copy(void *_dst, void *_src, ae_state *_state)
void qpbleicoptimize(convexquadraticmodel *a, sparsematrix *sparsea, ae_int_t akind, ae_bool sparseaupper, double absasum, double absasum2, ae_vector *b, ae_vector *bndl, ae_vector *bndu, ae_vector *s, ae_vector *xorigin, ae_int_t n, ae_matrix *cleic, ae_int_t nec, ae_int_t nic, qpbleicsettings *settings, qpbleicbuffers *sstate, ae_bool *firstcall, ae_vector *xs, ae_int_t *terminationtype, ae_state *_state)
void minnssetbc(minnsstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
void minqpsetbc(minqpstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
virtual ~minnlcstate()
void _minnlcreport_destroy(void *_p)
void minbcresultsbuf(const minbcstate &state, real_1d_array &x, minbcreport &rep)
void _snnlssolver_init(void *_p, ae_state *_state)
void minnscreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minnsstate &state)
void _minnsreport_destroy(void *_p)
alglib_impl::mincgstate * c_ptr()
void minasarestartfrom(const minasastate &state, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu)
minqpreport & operator=(const minqpreport &rhs)
void minbcsetstpmax(minbcstate *state, double stpmax, ae_state *_state)
void minlmsetscale(const minlmstate &state, const real_1d_array &s)
alglib_impl::minnsreport * c_ptr()
void _snnlssolver_clear(void *_p)
_minbcreport_owner & operator=(const _minbcreport_owner &rhs)
void minlmcreatefgh(ae_int_t n, ae_vector *x, minlmstate *state, ae_state *_state)
void minlbfgsoptimize(minlbfgsstate &state, void(*func)(const real_1d_array &x, double &func, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr)=NULL, void *ptr=NULL)
ae_bool minbciteration(minbcstate *state, ae_state *_state)
void minqpsetlcsparse(const minqpstate &state, const sparsematrix &c, const integer_1d_array &ct, const ae_int_t k)
void _precbuflbfgs_destroy(void *_p)
void minlmsetxrep(minlmstate *state, ae_bool needxrep, ae_state *_state)
ae_int_t & iterationscount
void _qqpsettings_init_copy(void *_dst, void *_src, ae_state *_state)
void minqpsetstartingpoint(minqpstate *state, ae_vector *x, ae_state *_state)
alglib_impl::minlbfgsreport * c_ptr()
void _precbuflowrank_destroy(void *_p)
void _minasareport_destroy(void *_p)
void _qpcholeskysettings_init(void *_p, ae_state *_state)
void minqpsetquadraticterm(minqpstate *state, ae_matrix *a, ae_bool isupper, ae_state *_state)
ae_bool mincgiteration(mincgstate *state, ae_state *_state)
void mincgcreate(ae_int_t n, ae_vector *x, mincgstate *state, ae_state *_state)
double cqmdebugconstrainedevalt(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void estimateparabolicmodel(double absasum, double absasum2, double mx, double mb, double md, double d1, double d2, ae_int_t *d1est, ae_int_t *d2est, ae_state *_state)
void minbcsetbc(minbcstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
void _qpbleicsettings_clear(void *_p)
void minnlcsetcond(const minnlcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void minbleiccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbleicstate &state)
minasareport & operator=(const minasareport &rhs)
void _qpbleicbuffers_destroy(void *_p)
real_1d_array x
void minbleicsetstpmax(const minbleicstate &state, const double stpmax)
void minbleicsetbarrierwidth(minbleicstate *state, double mu, ae_state *_state)
_minbleicreport_owner & operator=(const _minbleicreport_owner &rhs)
void minlbfgssetdefaultpreconditioner(minlbfgsstate *state, ae_state *_state)
void minbcsetgradientcheck(const minbcstate &state, const double teststep)
void minbleicsetgradientcheck(minbleicstate *state, double teststep, ae_state *_state)
void qpdenseaulloaddefaults(ae_int_t nmain, qpdenseaulsettings *s, ae_state *_state)
void minlmsetstpmax(const minlmstate &state, const double stpmax)
_minnsstate_owner & operator=(const _minnsstate_owner &rhs)
void minnlcsetprecexactrobust(const minnlcstate &state, const ae_int_t updatefreq)
void _minlmreport_clear(void *_p)
alglib_impl::minqpstate * c_ptr()
void qpcholeskyloaddefaults(ae_int_t nmain, qpcholeskysettings *s, ae_state *_state)
void _qqpsettings_clear(void *_p)
alglib_impl::minnsstate * c_ptr()
void minlmsetscale(minlmstate *state, ae_vector *s, ae_state *_state)
alglib_impl::minbcstate * p_struct
void minnsresults(const minnsstate &state, real_1d_array &x, minnsreport &rep)
void minnssetscale(const minnsstate &state, const real_1d_array &s)
void _minnsstate_init_copy(void *_dst, void *_src, ae_state *_state)
void _minlmreport_destroy(void *_p)
double mincglastgoodstep(mincgstate *state, ae_state *_state)
void minlbfgsresultsbuf(minlbfgsstate *state, ae_vector *x, minlbfgsreport *rep, ae_state *_state)
void _minbleicreport_init_copy(void *_dst, void *_src, ae_state *_state)
double sasscaledconstrainednorm(sactiveset *state, ae_vector *d, ae_state *_state)
alglib_impl::minnsreport * p_struct
void minqpsetscale(minqpstate *state, ae_vector *s, ae_state *_state)
void _qqpbuffers_init(void *_p, ae_state *_state)
mincgreport & operator=(const mincgreport &rhs)
void minqpsetquadratictermsparse(const minqpstate &state, const sparsematrix &a, const bool isupper)
void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleicreport &rep)
void _minbcstate_destroy(void *_p)
void minasasetstpmax(const minasastate &state, const double stpmax)
void _minbcstate_init_copy(void *_dst, void *_src, ae_state *_state)
void minnlcresultsbuf(minnlcstate *state, ae_vector *x, minnlcreport *rep, ae_state *_state)
void minqprewritediagonal(minqpstate *state, ae_vector *s, ae_state *_state)
alglib_impl::minbcreport * c_ptr()
void _minlbfgsstate_destroy(void *_p)
virtual ~minnsstate()
ae_bool minnlciteration(minnlcstate *state, ae_state *_state)
void _minnsstate_destroy(void *_p)
void _minasastate_destroy(void *_p)
void minqpsetorigin(minqpstate *state, ae_vector *xorigin, ae_state *_state)
_minbleicstate_owner & operator=(const _minbleicstate_owner &rhs)
void _minlbfgsreport_init_copy(void *_dst, void *_src, ae_state *_state)
void minnlcsetlc(const minnlcstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k)
void _minbcstate_init(void *_p, ae_state *_state)
void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x)
void minnsresultsbuf(const minnsstate &state, real_1d_array &x, minnsreport &rep)
ae_int_t & terminationtype
void mincgsetprecscale(mincgstate *state, ae_state *_state)
virtual ~minasastate()
void minbleicsetxrep(const minbleicstate &state, const bool needxrep)
void minasasetalgorithm(minasastate *state, ae_int_t algotype, ae_state *_state)
void minbleicresults(minbleicstate *state, ae_vector *x, minbleicreport *rep, ae_state *_state)
void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper)
void minnssetxrep(minnsstate *state, ae_bool needxrep, ae_state *_state)
_minlbfgsstate_owner & operator=(const _minlbfgsstate_owner &rhs)
ae_int_t numberofchangedconstraints(ae_vector *x, ae_vector *xprev, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_int_t nmain, ae_int_t nslack, ae_state *_state)
minnlcstate & operator=(const minnlcstate &rhs)
void minlbfgssetcholeskypreconditioner(minlbfgsstate *state, ae_matrix *p, ae_bool isupper, ae_state *_state)
void _convexquadraticmodel_destroy(void *_p)
void minbleicsetbarrierdecay(minbleicstate *state, double mudecay, ae_state *_state)
void sasexploredirection(sactiveset *state, ae_vector *d, double *stpmax, ae_int_t *cidx, double *vval, ae_state *_state)
void _minasastate_init_copy(void *_dst, void *_src, ae_state *_state)
void calculatestepbound(ae_vector *x, ae_vector *d, double alpha, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_int_t nmain, ae_int_t nslack, ae_int_t *variabletofreeze, double *valuetofreeze, double *maxsteplen, ae_state *_state)
void minqpresultsbuf(minqpstate *state, ae_vector *x, minqpreport *rep, ae_state *_state)
minlmreport(const minlmreport &rhs)
minnlcstate(const minnlcstate &rhs)
void mincgsetcond(const mincgstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void mincgsetxrep(const mincgstate &state, const bool needxrep)
void _qpcholeskybuffers_init_copy(void *_dst, void *_src, ae_state *_state)
void minbcsetgradientcheck(minbcstate *state, double teststep, ae_state *_state)
void _minlbfgsreport_destroy(void *_p)
void _qpcholeskybuffers_destroy(void *_p)
virtual ~mincgreport()
void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin)
void minlmrequesttermination(minlmstate *state, ae_state *_state)
void mincgsetxrep(mincgstate *state, ae_bool needxrep, ae_state *_state)
ae_int_t & inneriterationscount
void minlmsetacctype(minlmstate *state, ae_int_t acctype, ae_state *_state)
#define ae_bool
Definition: ap.h:193
void mincgrequesttermination(mincgstate *state, ae_state *_state)
void qpdenseauloptimize(convexquadraticmodel *a, sparsematrix *sparsea, ae_int_t akind, ae_bool sparseaupper, ae_vector *b, ae_vector *bndl, ae_vector *bndu, ae_vector *s, ae_vector *xorigin, ae_int_t nn, ae_matrix *cleic, ae_int_t dnec, ae_int_t dnic, sparsematrix *scleic, ae_int_t snec, ae_int_t snic, ae_bool renormlc, qpdenseaulsettings *settings, qpdenseaulbuffers *state, ae_vector *xs, ae_int_t *terminationtype, ae_state *_state)
minnlcreport & operator=(const minnlcreport &rhs)
void minlmrestartfrom(minlmstate *state, ae_vector *x, ae_state *_state)
void mincgsetprecdefault(const mincgstate &state)
void minlbfgssetscale(minlbfgsstate *state, ae_vector *s, ae_state *_state)
bool minbleiciteration(const minbleicstate &state)
void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &state)
void minqpsetlcmixed(minqpstate *state, ae_matrix *densec, ae_vector *densect, ae_int_t densek, sparsematrix *sparsec, ae_vector *sparsect, ae_int_t sparsek, ae_state *_state)
void minqpresultsbuf(const minqpstate &state, real_1d_array &x, minqpreport &rep)
void sassetscale(sactiveset *state, ae_vector *s, ae_state *_state)
void minbleicsetprecdefault(minbleicstate *state, ae_state *_state)
alglib_impl::minasareport * c_ptr()
void minbcsetscale(const minbcstate &state, const real_1d_array &s)
void mincgsetprecdefault(mincgstate *state, ae_state *_state)
void minnlcsetlc(minnlcstate *state, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
minasareport(const minasareport &rhs)
void _minlmstepfinder_clear(void *_p)
real_1d_array x
void minbcrequesttermination(minbcstate *state, ae_state *_state)
void minbleicresultsbuf(minbleicstate *state, ae_vector *x, minbleicreport *rep, ae_state *_state)
void minbleicsetprecdiag(minbleicstate *state, ae_vector *d, ae_state *_state)
void _minlbfgsreport_clear(void *_p)
bool minasaiteration(const minasastate &state)
void _minbcreport_init(void *_p, ae_state *_state)
void minbleicrequesttermination(minbleicstate *state, ae_state *_state)
void minnssetalgoags(minnsstate *state, double radius, double penalty, ae_state *_state)
minbleicreport & operator=(const minbleicreport &rhs)
void minlbfgssetgradientcheck(minlbfgsstate *state, double teststep, ae_state *_state)
void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state)
real_1d_array g
void minnlcsetgradientcheck(minnlcstate *state, double teststep, ae_state *_state)
void _minnlcreport_init(void *_p, ae_state *_state)
virtual ~minqpreport()
void minasasetcond(const minasastate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void cqmgradunconstrained(convexquadraticmodel *s, ae_vector *x, ae_vector *g, ae_state *_state)
void _minlbfgsstate_clear(void *_p)
void minbccreate(ae_int_t n, ae_vector *x, minbcstate *state, ae_state *_state)
_minasareport_owner & operator=(const _minasareport_owner &rhs)
void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &rep)
void minlmcreatevj(ae_int_t n, ae_int_t m, ae_vector *x, minlmstate *state, ae_state *_state)
void minnlcsetstpmax(minnlcstate *state, double stpmax, ae_state *_state)
void _snnlssolver_destroy(void *_p)
void minnlcrestartfrom(const minnlcstate &state, const real_1d_array &x)
void minnlccreatef(ae_int_t n, ae_vector *x, double diffstep, minnlcstate *state, ae_state *_state)
alglib_impl::minlbfgsstate * c_ptr()
void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const real_2d_array &p, const bool isupper)
void minlbfgscreatef(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state)
void cqmsetb(convexquadraticmodel *s, ae_vector *b, ae_state *_state)
virtual ~minlmreport()
void _convexquadraticmodel_clear(void *_p)
alglib_impl::minlmreport * p_struct
void minbcrestartfrom(const minbcstate &state, const real_1d_array &x)
void _minnsqp_init(void *_p, ae_state *_state)
real_1d_array fi
void sasrebuildbasis(sactiveset *state, ae_state *_state)
void sassetlcx(sactiveset *state, ae_matrix *cleic, ae_int_t nec, ae_int_t nic, ae_state *_state)
void minlbfgssetgradientcheck(const minlbfgsstate &state, const double teststep)
void minasaresultsbuf(const minasastate &state, real_1d_array &x, minasareport &rep)
real_1d_array fi
void trimfunction(double *f, ae_vector *g, ae_int_t n, double threshold, ae_state *_state)
void _minnlcstate_init(void *_p, ae_state *_state)
void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state)
ae_int_t & iterationscount
minbcstate & operator=(const minbcstate &rhs)
struct alglib_impl::ae_vector ae_vector
void _minbcreport_clear(void *_p)
void _precbuflowrank_init(void *_p, ae_state *_state)
virtual ~mincgstate()
void _minqpstate_destroy(void *_p)
void _minlmstate_init(void *_p, ae_state *_state)
void minnsresultsbuf(minnsstate *state, ae_vector *x, minnsreport *rep, ae_state *_state)
void cqmscalevector(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void minbccreatef(ae_int_t n, ae_vector *x, double diffstep, minbcstate *state, ae_state *_state)
void qqploaddefaults(ae_int_t nmain, qqpsettings *s, ae_state *_state)
void minbleicsetbc(minbleicstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
void _qpbleicsettings_init(void *_p, ae_state *_state)
void minbcsetprecdefault(minbcstate *state, ae_state *_state)
ae_int_t & terminationtype
minnsreport & operator=(const minnsreport &rhs)
void minlbfgsresultsbuf(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep)
void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_array &p, const bool isupper)
void minnlcsetnlc(minnlcstate *state, ae_int_t nlec, ae_int_t nlic, ae_state *_state)
double cqmxtadx2(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void _minlmstate_destroy(void *_p)
ae_bool enforceboundaryconstraints(ae_vector *x, ae_vector *bl, ae_vector *havebl, ae_vector *bu, ae_vector *havebu, ae_int_t nmain, ae_int_t nslack, ae_state *_state)
ae_int_t & outeriterationscount
void minnlcsetbc(minnlcstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
minbcreport & operator=(const minbcreport &rhs)
void sasconstraineddirectionprec(sactiveset *state, ae_vector *d, ae_state *_state)
void minqpsetalgocholesky(minqpstate *state, ae_state *_state)
void minlbfgscreate(ae_int_t n, ae_int_t m, ae_vector *x, minlbfgsstate *state, ae_state *_state)
minbcstate(const minbcstate &rhs)
void minlbfgsresults(minlbfgsstate *state, ae_vector *x, minlbfgsreport *rep, ae_state *_state)
void minnssetxrep(const minnsstate &state, const bool needxrep)
minbcreport(const minbcreport &rhs)
ae_bool minasaiteration(minasastate *state, ae_state *_state)
ae_int_t & iterationscount
void _minnsstate_clear(void *_p)
void minnlcsetnlc(const minnlcstate &state, const ae_int_t nlec, const ae_int_t nlic)
void minnsrequesttermination(const minnsstate &state)
void minbleicrequesttermination(const minbleicstate &state)
void minqpsetalgodenseaul(minqpstate *state, double epsx, double rho, ae_int_t itscnt, ae_state *_state)
void _minlbfgsstate_init_copy(void *_dst, void *_src, ae_state *_state)
void qqpcopysettings(qqpsettings *src, qqpsettings *dst, ae_state *_state)
void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d)
ae_int_t & terminationtype
void cqmdropa(convexquadraticmodel *s, ae_state *_state)
void minqpsetalgobleic(minqpstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
void _sactiveset_init(void *_p, ae_state *_state)
void _minasastate_clear(void *_p)
alglib_impl::minasastate * p_struct
void minasaoptimize(minasastate &state, void(*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr)=NULL, void *ptr=NULL)
void minasasetalgorithm(const minasastate &state, const ae_int_t algotype)
virtual ~minlmstate()
void minnlcinequalityshiftfunction(double alpha, double *f, double *df, double *d2f, ae_state *_state)
_minnsreport_owner & operator=(const _minnsreport_owner &rhs)
void _snnlssolver_init_copy(void *_dst, void *_src, ae_state *_state)
void minbcsetcond(minbcstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
void minnlcsetstpmax(const minnlcstate &state, const double stpmax)
alglib_impl::minlmstate * p_struct
void minasaresults(const minasastate &state, real_1d_array &x, minasareport &rep)
void minqpsetlc(minqpstate *state, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
void minqpsetoriginfast(minqpstate *state, ae_vector *xorigin, ae_state *_state)
void minbcsetxrep(minbcstate *state, ae_bool needxrep, ae_state *_state)
void minbcsetprecdefault(const minbcstate &state)
bool mincgiteration(const mincgstate &state)
minlbfgsstate(const minlbfgsstate &rhs)
ae_int_t & outeriterationscount
void minqpresults(minqpstate *state, ae_vector *x, minqpreport *rep, ae_state *_state)
ae_bool minbleiciteration(minbleicstate *state, ae_state *_state)
void mincgsetprecvarpart(mincgstate *state, ae_vector *d2, ae_state *_state)
void minbcresults(const minbcstate &state, real_1d_array &x, minbcreport &rep)
minlmreport & operator=(const minlmreport &rhs)
void _qpdenseaulbuffers_clear(void *_p)
void _minasastate_init(void *_p, ae_state *_state)
void _minasareport_clear(void *_p)
alglib_impl::ae_int_t ae_int_t
Definition: ap.h:965
void minnlcinequalitypenaltyfunction(double alpha, double stabilizingpoint, double *f, double *df, double *d2f, ae_state *_state)
void minnlcsetprecnone(minnlcstate *state, ae_state *_state)
void minlmsetstpmax(minlmstate *state, double stpmax, ae_state *_state)
minqpstate(const minqpstate &rhs)
void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x)
void minnlcsetalgoaul(const minnlcstate &state, const double rho, const ae_int_t itscnt)
void minlmsetacctype(const minlmstate &state, const ae_int_t acctype)
void _qpcholeskysettings_init_copy(void *_dst, void *_src, ae_state *_state)
void mincgsuggeststep(mincgstate *state, double stp, ae_state *_state)
void _convexquadraticmodel_init_copy(void *_dst, void *_src, ae_state *_state)
void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep)
void _minlmstepfinder_destroy(void *_p)
alglib_impl::mincgreport * c_ptr()
void preparelowrankpreconditioner(ae_vector *d, ae_vector *c, ae_matrix *w, ae_int_t n, ae_int_t k, precbuflowrank *buf, ae_state *_state)
alglib_impl::minnlcstate * c_ptr()
void _minlmstate_init_copy(void *_dst, void *_src, ae_state *_state)
void minbleiccreate(ae_int_t n, ae_vector *x, minbleicstate *state, ae_state *_state)
void qpbleicloaddefaults(ae_int_t nmain, qpbleicsettings *s, ae_state *_state)
void _qpbleicbuffers_clear(void *_p)
void mincgsetcond(mincgstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
alglib_impl::mincgreport * p_struct
alglib_impl::minnsstate * p_struct
_minlbfgsreport_owner & operator=(const _minlbfgsreport_owner &rhs)
void _qpdenseaulbuffers_destroy(void *_p)
void sassetprecdiag(sactiveset *state, ae_vector *d, ae_state *_state)
void minbleicsetscale(minbleicstate *state, ae_vector *s, ae_state *_state)
void minasasetstpmax(minasastate *state, double stpmax, ae_state *_state)
void minnscreate(ae_int_t n, ae_vector *x, minnsstate *state, ae_state *_state)
void minqpsetquadratictermfast(minqpstate *state, ae_matrix *a, ae_bool isupper, double s, ae_state *_state)
void minbccreate(const ae_int_t n, const real_1d_array &x, minbcstate &state)
_minasastate_owner & operator=(const _minasastate_owner &rhs)
void minbleicsetprecscale(minbleicstate *state, ae_state *_state)
minlbfgsstate & operator=(const minlbfgsstate &rhs)
void _precbuflbfgs_clear(void *_p)
void _minnsqp_init_copy(void *_dst, void *_src, ae_state *_state)
void minqpsetquadratictermsparse(minqpstate *state, sparsematrix *a, ae_bool isupper, ae_state *_state)
void minbleicsetscale(const minbleicstate &state, const real_1d_array &s)
void _minnsreport_init(void *_p, ae_state *_state)
void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d)
_minlmstate_owner & operator=(const _minlmstate_owner &rhs)
void minqpsetlinearterm(minqpstate *state, ae_vector *b, ae_state *_state)
void cqmseta(convexquadraticmodel *s, ae_matrix *a, ae_bool isupper, double alpha, ae_state *_state)
void minlmsetlc(const minlmstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k)
ae_bool findfeasiblepoint(ae_vector *x, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_int_t nmain, ae_int_t nslack, ae_matrix *ce, ae_int_t k, double epsi, ae_int_t *qpits, ae_int_t *gpaits, ae_state *_state)
void qpcholeskycopysettings(qpcholeskysettings *src, qpcholeskysettings *dst, ae_state *_state)
void _minlmreport_init(void *_p, ae_state *_state)
void _mincgstate_init(void *_p, ae_state *_state)
void minnlcsetalgoaul(minnlcstate *state, double rho, ae_int_t itscnt, ae_state *_state)
void _qpdenseaulsettings_destroy(void *_p)
void minqpsetlcsparse(minqpstate *state, sparsematrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
void _minasareport_init(void *_p, ae_state *_state)
void _minbleicreport_destroy(void *_p)
void minlbfgssetprecdiag(minlbfgsstate *state, ae_vector *d, ae_state *_state)
void minnssetbc(const minnsstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
alglib_impl::minbleicreport * p_struct
void minqpsetalgocholesky(const minqpstate &state)
minbleicstate & operator=(const minbleicstate &rhs)
void cqmrewritedensediagonal(convexquadraticmodel *s, ae_vector *z, ae_state *_state)
void mincgsetscale(mincgstate *state, ae_vector *s, ae_state *_state)
void _minasareport_init_copy(void *_dst, void *_src, ae_state *_state)
void sassetlc(sactiveset *state, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
ae_bool minlbfgsiteration(minlbfgsstate *state, ae_state *_state)
void minnlcsetbc(const minnlcstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
virtual ~minbcreport()
void _minbleicreport_clear(void *_p)
_mincgstate_owner & operator=(const _mincgstate_owner &rhs)
void minbcsetprecscale(minbcstate *state, ae_state *_state)
void minbcrestartfrom(minbcstate *state, ae_vector *x, ae_state *_state)
void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
void _mincgstate_clear(void *_p)
minasastate & operator=(const minasastate &rhs)
void _minbcstate_clear(void *_p)
void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlbfgsstate &state)
alglib_impl::minbleicreport * c_ptr()
void mincgsetprecdiag(mincgstate *state, ae_vector *d, ae_state *_state)
void minbcsetcond(const minbcstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b)
minlbfgsreport & operator=(const minlbfgsreport &rhs)
_mincgreport_owner & operator=(const _mincgreport_owner &rhs)
real_1d_array x
void minnlcsetprecexactlowrank(const minnlcstate &state, const ae_int_t updatefreq)
double sasactivelcpenalty1(sactiveset *state, ae_vector *x, ae_state *_state)
void sascorrection(sactiveset *state, ae_vector *x, double *penalty, ae_state *_state)
mincgstate & operator=(const mincgstate &rhs)
void minnsrestartfrom(const minnsstate &state, const real_1d_array &x)
void inexactlbfgspreconditioner(ae_vector *s, ae_int_t n, ae_vector *d, ae_vector *c, ae_matrix *w, ae_int_t k, precbuflbfgs *buf, ae_state *_state)
void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d)
ae_int_t & iterationscount
alglib_impl::minlbfgsreport * p_struct
minlmstate & operator=(const minlmstate &rhs)
void minlmsetcond(const minlmstate &state, const double epsx, const ae_int_t maxits)
void _qpdenseaulbuffers_init(void *_p, ae_state *_state)
real_1d_array x
void minnlcoptimize(minnlcstate &state, void(*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr)=NULL, void *ptr=NULL)
void minlbfgssetprecscale(minlbfgsstate *state, ae_state *_state)
void minnlcrestartfrom(minnlcstate *state, ae_vector *x, ae_state *_state)
void minbleicsetxrep(minbleicstate *state, ae_bool needxrep, ae_state *_state)
void minnlcsetprecexactrobust(minnlcstate *state, ae_int_t updatefreq, ae_state *_state)
struct alglib_impl::ae_matrix ae_matrix
void mincgresultsbuf(const mincgstate &state, real_1d_array &x, mincgreport &rep)
void minnsrestartfrom(minnsstate *state, ae_vector *x, ae_state *_state)
void _qqpsettings_init(void *_p, ae_state *_state)
void minnlcsetcond(minnlcstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
void _precbuflowrank_clear(void *_p)
void mincgrestartfrom(const mincgstate &state, const real_1d_array &x)
void minqpoptimize(minqpstate *state, ae_state *_state)
void minbcsetprecscale(const minbcstate &state)
void mincgsetstpmax(const mincgstate &state, const double stpmax)
void minnsrequesttermination(minnsstate *state, ae_state *_state)
void minlmresults(minlmstate *state, ae_vector *x, minlmreport *rep, ae_state *_state)
void mincgsetpreclowrankfast(mincgstate *state, ae_vector *d1, ae_vector *c, ae_matrix *v, ae_int_t vcnt, ae_state *_state)
void _sactiveset_clear(void *_p)
void mincgrestartfrom(mincgstate *state, ae_vector *x, ae_state *_state)
void minlbfgssetxrep(minlbfgsstate *state, ae_bool needxrep, ae_state *_state)
void minnsresults(minnsstate *state, ae_vector *x, minnsreport *rep, ae_state *_state)
void minnlcresultsbuf(const minnlcstate &state, real_1d_array &x, minnlcreport &rep)
void _minlbfgsstate_init(void *_p, ae_state *_state)
void minlmsetlc(minlmstate *state, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
void _qqpbuffers_destroy(void *_p)
void mincgresults(mincgstate *state, ae_vector *x, mincgreport *rep, ae_state *_state)
void minnlcsetprecnone(const minnlcstate &state)
void minnlcsetxrep(minnlcstate *state, ae_bool needxrep, ae_state *_state)
void _sactiveset_destroy(void *_p)
void minlmresultsbuf(const minlmstate &state, real_1d_array &x, minlmreport &rep)
void minlbfgscreatef(ae_int_t n, ae_int_t m, ae_vector *x, double diffstep, minlbfgsstate *state, ae_state *_state)
void minlbfgssetprecdefault(const minlbfgsstate &state)
void minlmcreatevgj(ae_int_t n, ae_int_t m, ae_vector *x, minlmstate *state, ae_state *_state)
void mincgsetprecdiagfast(mincgstate *state, ae_vector *d, ae_state *_state)
void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
ae_int_t & activeconstraints
real_1d_array g
double cqmdebugconstrainedevale(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void _qpbleicsettings_init_copy(void *_dst, void *_src, ae_state *_state)
void minnlccreate(ae_int_t n, ae_vector *x, minnlcstate *state, ae_state *_state)
void minbleicsetstpmax(minbleicstate *state, double stpmax, ae_state *_state)
void _minbleicstate_destroy(void *_p)
void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &state)
real_2d_array h
void _minnsreport_clear(void *_p)
void minlmcreatefgj(ae_int_t n, ae_int_t m, ae_vector *x, minlmstate *state, ae_state *_state)
void _minlmreport_init_copy(void *_dst, void *_src, ae_state *_state)
void _precbuflbfgs_init_copy(void *_dst, void *_src, ae_state *_state)
minbleicreport(const minbleicreport &rhs)
minqpreport(const minqpreport &rhs)
void sasconstraineddescent(sactiveset *state, ae_vector *g, ae_vector *d, ae_state *_state)
_minnlcreport_owner & operator=(const _minnlcreport_owner &rhs)
void minbcsetstpmax(const minbcstate &state, const double stpmax)
minasastate(const minasastate &rhs)
void _qpdenseaulsettings_clear(void *_p)
void _qqpbuffers_init_copy(void *_dst, void *_src, ae_state *_state)
void mincgsuggeststep(const mincgstate &state, const double stp)
void qpbleiccopysettings(qpbleicsettings *src, qpbleicsettings *dst, ae_state *_state)
minbleicstate(const minbleicstate &rhs)
void minbcsetxrep(const minbcstate &state, const bool needxrep)
void minnssetnlc(const minnsstate &state, const ae_int_t nlec, const ae_int_t nlic)
ae_int_t & inneriterationscount
void minlbfgssetpreccholesky(minlbfgsstate *state, ae_matrix *p, ae_bool isupper, ae_state *_state)
alglib_impl::minqpstate * p_struct
void minbleicsetprecdefault(const minbleicstate &state)
void _minnlcreport_clear(void *_p)
void minnlcresults(const minnlcstate &state, real_1d_array &x, minnlcreport &rep)
void minqpoptimize(const minqpstate &state)
void _precbuflbfgs_init(void *_p, ae_state *_state)
void minasaresultsbuf(minasastate *state, ae_vector *x, minasareport *rep, ae_state *_state)
void snnlssetproblem(snnlssolver *s, ae_matrix *a, ae_vector *b, ae_int_t ns, ae_int_t nd, ae_int_t nr, ae_state *_state)
void _minnsqp_destroy(void *_p)
alglib_impl::minasareport * p_struct
real_1d_array g
void minnlcsetscale(minnlcstate *state, ae_vector *s, ae_state *_state)
void minqpcreate(ae_int_t n, minqpstate *state, ae_state *_state)
void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state)
void _minqpreport_init(void *_p, ae_state *_state)
void minbleicsetlc(minbleicstate *state, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
void minbleicsetgradientcheck(const minbleicstate &state, const double teststep)
void _minnsreport_init_copy(void *_dst, void *_src, ae_state *_state)
void minbcresults(minbcstate *state, ae_vector *x, minbcreport *rep, ae_state *_state)
void mincgsetcgtype(mincgstate *state, ae_int_t cgtype, ae_state *_state)
void minbleicsetprecscale(const minbleicstate &state)
_minlmreport_owner & operator=(const _minlmreport_owner &rhs)
alglib_impl::minlmstate * c_ptr()
void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate &state)
void minlbfgssetpreclowrankexact(minlbfgsstate *state, ae_vector *d, ae_vector *c, ae_matrix *w, ae_int_t cnt, ae_state *_state)
real_1d_array fi
void minlbfgssetcond(minlbfgsstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
ae_int_t & iterationscount
void minasasetxrep(const minasastate &state, const bool needxrep)
minnsstate & operator=(const minnsstate &rhs)
void _minbcreport_init_copy(void *_dst, void *_src, ae_state *_state)
void cqmsetd(convexquadraticmodel *s, ae_vector *d, double tau, ae_state *_state)
void minasacreate(ae_int_t n, ae_vector *x, ae_vector *bndl, ae_vector *bndu, minasastate *state, ae_state *_state)
void mincgsetscale(const mincgstate &state, const real_1d_array &s)
void minbleiccreatef(ae_int_t n, ae_vector *x, double diffstep, minbleicstate *state, ae_state *_state)
void _minnsstate_init(void *_p, ae_state *_state)
void _minnlcstate_destroy(void *_p)
_minbcstate_owner & operator=(const _minbcstate_owner &rhs)
void _minqpreport_init_copy(void *_dst, void *_src, ae_state *_state)
void minlbfgsrestartfrom(minlbfgsstate *state, ae_vector *x, ae_state *_state)
void _mincgstate_init_copy(void *_dst, void *_src, ae_state *_state)
alglib_impl::minlmreport * c_ptr()
void _minnlcreport_init_copy(void *_dst, void *_src, ae_state *_state)
void snnlssolve(snnlssolver *s, ae_vector *x, ae_state *_state)
void minqpsetlcmixed(const minqpstate &state, const real_2d_array &densec, const integer_1d_array &densect, const ae_int_t densek, const sparsematrix &sparsec, const integer_1d_array &sparsect, const ae_int_t sparsek)
void minlmsetxrep(const minlmstate &state, const bool needxrep)
void mincgcreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, mincgstate &state)
void _minlmstepfinder_init(void *_p, ae_state *_state)
bool minlbfgsiteration(const minlbfgsstate &state)
void minlmcreatev(ae_int_t n, ae_int_t m, ae_vector *x, double diffstep, minlmstate *state, ae_state *_state)
ptrdiff_t ae_int_t
Definition: ap.h:185
void minlmsetcond(minlmstate *state, double epsx, ae_int_t maxits, ae_state *_state)
void minlbfgscreatex(ae_int_t n, ae_int_t m, ae_vector *x, ae_int_t flags, double diffstep, minlbfgsstate *state, ae_state *_state)
void _minbleicstate_clear(void *_p)
minqpstate & operator=(const minqpstate &rhs)
void _minlmstate_clear(void *_p)
void minlmsetgradientcheck(minlmstate *state, double teststep, ae_state *_state)
void minbleicsetcond(minbleicstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
mincgstate(const mincgstate &rhs)
ae_int_t & terminationtype
void mincgsetdrep(mincgstate *state, ae_bool needdrep, ae_state *_state)
void _minqpreport_destroy(void *_p)
mincgreport(const mincgreport &rhs)
void minnlcsetxrep(const minnlcstate &state, const bool needxrep)
void _qpbleicbuffers_init_copy(void *_dst, void *_src, ae_state *_state)
void cqmadx(convexquadraticmodel *s, ae_vector *x, ae_vector *y, ae_state *_state)
void minqpsetalgobleic(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void minqpcreate(const ae_int_t n, minqpstate &state)
bool minnsiteration(const minnsstate &state)
void minnlcsetgradientcheck(const minnlcstate &state, const double teststep)
void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state)
void minnssetlc(minnsstate *state, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
void sasreactivateconstraints(sactiveset *state, ae_vector *gc, ae_state *_state)
alglib_impl::minqpreport * c_ptr()
void minnlcsetprecinexact(const minnlcstate &state)
void _minbleicreport_init(void *_p, ae_state *_state)
bool minnlciteration(const minnlcstate &state)
void minbcsetprecdiag(minbcstate *state, ae_vector *d, ae_state *_state)
void _minqpstate_clear(void *_p)
void _qpcholeskysettings_destroy(void *_p)
void minqpsetalgoquickqp(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxouterits, const bool usenewton)
_minqpreport_owner & operator=(const _minqpreport_owner &rhs)
void sasconstraineddirection(sactiveset *state, ae_vector *d, ae_state *_state)
void minnscreatef(ae_int_t n, ae_vector *x, double diffstep, minnsstate *state, ae_state *_state)
void minbleicsetbarrierwidth(const minbleicstate &state, const double mu)
void _qpcholeskybuffers_init(void *_p, ae_state *_state)
ae_bool derivativecheck(double f0, double df0, double f1, double df1, double f, double df, double width, ae_state *_state)
ae_int_t & terminationtype
virtual ~minbcstate()
void _minlmstepfinder_init_copy(void *_dst, void *_src, ae_state *_state)
void _qpbleicbuffers_init(void *_p, ae_state *_state)
void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype)
void minnssetscale(minnsstate *state, ae_vector *s, ae_state *_state)
alglib_impl::minbleicstate * c_ptr()
Page URL: http://wiki.math.ethz.ch/bin/view/Concepts/WebHome
21 August 2020
© 2020 Eidgenössische Technische Hochschule Zürich