mmin_conf.h
Go to the documentation of this file.
1 /*
2  -------------------------------------------------------------------
3 
4  Copyright (C) 2006-2017, Andrew W. Steiner
5 
6  This file is part of O2scl.
7 
8  O2scl is free software; you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation; either version 3 of the License, or
11  (at your option) any later version.
12 
13  O2scl is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with O2scl. If not, see <http://www.gnu.org/licenses/>.
20 
21  -------------------------------------------------------------------
22 */
23 #ifndef O2SCL_GSL_MMIN_CONF_H
24 #define O2SCL_GSL_MMIN_CONF_H
25 
26 /** \file mmin_conf.h
27  \brief File defining \ref o2scl::mmin_conf
28 */
29 
30 #include <gsl/gsl_blas.h>
31 #include <gsl/gsl_multimin.h>
32 
33 #include <boost/numeric/ublas/vector.hpp>
34 #include <boost/numeric/ublas/matrix.hpp>
35 
36 #include <o2scl/mmin.h>
37 #include <o2scl/misc.h>
38 #include <o2scl/cblas.h>
39 
40 #ifndef DOXYGEN_NO_O2NS
41 namespace o2scl {
42 #endif
43 
44  /** \brief Base minimization routines for mmin_conf and mmin_conp
45 
46  This class is used by the mmin_conf and mmin_conp
47  minimizers to perform the line minimization along a specified
48  direction. It is not intended for a casual end-user.
49 
50  Default template arguments
51  - \c func_t - \ref multi_funct11
52  - \c vec_t - \ref boost::numeric::ublas::vector <double >
53  - \c dfunc_t - \ref mm_funct11
54  - \c auto_grad_t - \ref gradient < func_t >
55  - \c def_auto_grad_t - \ref gradient_gsl <func_t >
56 
57  \comment
58  Doxygen has had trouble parsing this template, so I have
59  simplified it slightly for the documentation below.
60  \endcomment
61  */
62  template<class func_t = multi_funct11,
64  class dfunc_t = grad_funct11,
65  class auto_grad_t = gradient<multi_funct11,
67  class def_auto_grad_t =
68  gradient_gsl<multi_funct11,boost::numeric::ublas::vector<double> > >
69  class mmin_gsl_base : public mmin_base<func_t,func_t,vec_t> {
70 
71 #ifndef DOXYGEN_INTERNAL
72 
73  protected:
74 
75  typedef boost::numeric::ublas::vector<double> ubvector;
77 
78  /// User-specified function
79  func_t *func;
80 
81  /// User-specified gradient
82  dfunc_t *grad;
83 
84  /// Automatic gradient object
85  auto_grad_t *agrad;
86 
87  /// If true, a gradient has been specified
88  bool grad_given;
89 
90  /// Memory size
91  size_t dim;
92 
93  /// Take a step
94  void take_step(const vec_t &x, const vec_t &px,
95  double stepx, double lambda, vec_t &x1x,
96  vec_t &dx) {
97 
98  for(size_t i=0;i<this->dim;i++) dx[i]=0.0;
99  o2scl_cblas::daxpy(-stepx*lambda,this->dim,px,dx);
100 
101  for(size_t i=0;i<this->dim;i++) x1x[i]=x[i];
102  o2scl_cblas::daxpy(1.0,this->dim,dx,x1x);
103 
104  return;
105  }
106 
107  /** \brief Line minimization
108 
109  Do a line minimisation in the region (xa,fa) (xc,fc) to find
110  an intermediate (xb,fb) satisifying fa > fb < fc. Choose an
111  initial xb based on parabolic interpolation.
112  */
113  void intermediate_point(const vec_t &x, const vec_t &px,
114  double lambda, double pg, double stepa,
115  double stepc, double fa, double fc,
116  vec_t &x1x, vec_t &dx, vec_t &gradient,
117  double *stepx, double *f) {
118  double stepb, fb;
119 
120  bool trial_failed;
121 
122  // This looks like an infinite loop, but will eventually
123  // stop because stepb will eventually become zero. This
124  // error condition is handled later in iterate().
125 
126  do {
127 
128  double u = fabs (pg * lambda * stepc);
129  stepb = 0.5 * stepc * u / ((fc - fa) + u);
130 
131  take_step(x, px, stepb, lambda, x1x, dx);
132 
133  // Immediately exit if trial point is the same as the
134  // initial point (as updated in GSL 1.15)
135  bool vector_equal=true;
136  for(size_t i=0;i<dim;i++) {
137  if (x[i]!=x1x[i]) vector_equal=false;
138  }
139  if (vector_equal) {
140 
141  *stepx=0.0;
142  *f=fa;
143 
144  // Evaluate the gradient
145  if (grad_given) {
146  (*grad)(dim,x1x,gradient);
147  } else {
148  (*agrad)(dim,x1x,gradient);
149  }
150 
151  return;
152  }
153 
154  // Evaluate the function
155  fb=(*func)(dim,x1x);
156 
157  trial_failed=false;
158  if (fb >= fa && stepb > 0.0) {
159  // [GSL] downhill step failed, reduce step-size and try again
160  fc = fb;
161  stepc = stepb;
162  trial_failed=true;
163  }
164 
165  } while (trial_failed);
166 
167  *stepx = stepb;
168  *f = fb;
169 
170  // Evaluate the gradient
171  if (grad_given) {
172  (*grad)(dim,x1x,gradient);
173  } else {
174  (*agrad)(dim,x1x,gradient);
175  }
176 
177  return;
178  }
179 
180  /** \brief Perform the minimization
181 
182  Starting at (x0, f0) move along the direction p to find a
183  minimum f(x0 - lambda * p), returning the new point x1 =
184  x0-lambda*p, f1=f(x1) and g1 = grad(f) at x1.
185  */
186  void min(const vec_t &x, const vec_t &xp, double lambda,
187  double stepa, double stepb, double stepc, double fa,
188  double fb, double fc, double xtol, vec_t &x1x,
189  vec_t &dx1x, vec_t &x2x, vec_t &dx2x, vec_t &gradient,
190  double *xstep, double *f, double *gnorm_u) {
191 
192  double u = stepb;
193  double v = stepa;
194  double w = stepc;
195  double fu = fb;
196  double fv = fa;
197  double fw = fc;
198 
199  double old2 = fabs(w - v);
200  double old1 = fabs(v - u);
201 
202  double stepm, fm, pg, gnorm1;
203 
204  int xiter = 0;
205 
206  for(size_t i=0;i<dim;i++) {
207  x2x[i]=x1x[i];
208  dx2x[i]=dx1x[i];
209  }
210 
211  *f = fb;
212  *xstep = stepb;
213  *gnorm_u = o2scl_cblas::dnrm2(dim,gradient);
214 
215  while (true) {
216 
217  xiter++;
218 
219  if (xiter > nmaxiter) {
220  // Exceeded maximum number of iterations
221  return;
222  }
223 
224  {
225  double dw = w - u;
226  double dv = v - u;
227  double du = 0.0;
228  double e1 = ((fv - fu) * dw * dw + (fu - fw) * dv * dv);
229  double e2 = 2.0 * ((fv - fu) * dw + (fu - fw) * dv);
230 
231  if (e2 != 0.0) {
232  du = e1 / e2;
233  }
234 
235  if (du > 0.0 && du < (stepc - stepb) && fabs(du) < 0.5 * old2) {
236  stepm = u + du;
237  } else if (du < 0.0 && du > (stepa - stepb) &&
238  fabs(du) < 0.5 * old2) {
239  stepm = u + du;
240  } else if ((stepc - stepb) > (stepb - stepa)) {
241  stepm = 0.38 * (stepc - stepb) + stepb;
242  } else {
243  stepm = stepb - 0.38 * (stepb - stepa);
244  }
245  }
246 
247  take_step (x, xp, stepm, lambda, x1x, dx1x);
248 
249  fm=(*func)(dim,x1x);
250 
251  if (fm > fb) {
252 
253  if (fm < fv) {
254  w = v;
255  v = stepm;
256  fw = fv;
257  fv = fm;
258  } else if (fm < fw) {
259  w = stepm;
260  fw = fm;
261  }
262  if (stepm < stepb) {
263  stepa = stepm;
264  fa = fm;
265  } else {
266  stepc = stepm;
267  fc = fm;
268  }
269 
270  } else if (fm <= fb) {
271 
272  old2 = old1;
273  old1 = fabs(u - stepm);
274  w = v;
275  v = u;
276  u = stepm;
277  fw = fv;
278  fv = fu;
279  fu = fm;
280 
281  for(size_t i=0;i<dim;i++) {
282  x2x[i]=x1x[i];
283  dx2x[i]=dx1x[i];
284  }
285 
286  if (grad_given) {
287  (*grad)(dim,x1x,gradient);
288  } else {
289  (*agrad)(dim,x1x,gradient);
290  }
291 
292  pg=o2scl_cblas::ddot(dim,xp,gradient);
293  gnorm1 = o2scl_cblas::dnrm2(dim,gradient);
294 
295  *f = fm;
296  *xstep = stepm;
297  *gnorm_u = gnorm1;
298 
299  if (fabs (pg * lambda / gnorm1) < xtol) {
300  // [GSL] SUCCESS
301  return;
302  }
303 
304  if (stepm < stepb) {
305  stepc = stepb;
306  fc = fb;
307  stepb = stepm;
308  fb = fm;
309  } else {
310  stepa = stepb;
311  fa = fb;
312  stepb = stepm;
313  fb = fm;
314  }
315 
316  }
317 
318  }
319 
320  return;
321  }
322 
323 #endif
324 
325  public:
326 
327  /// Stepsize for finite-differencing ( default \f$ 10^{-4} \f$ )
328  double deriv_h;
329 
330  /// Maximum iterations for line minimization (default 10)
331  int nmaxiter;
332 
333  /// Default automatic \gradient object
334  def_auto_grad_t def_grad;
335 
336  mmin_gsl_base() {
337  deriv_h=1.0e-4;
338  nmaxiter=10;
339  grad_given=false;
340  agrad=&def_grad;
341  }
342 
343  /// Set the function
344  int base_set(func_t &ufunc, auto_grad_t &u_def_grad) {
345  func=&ufunc;
346  agrad=&u_def_grad;
347  grad_given=false;
348  return 0;
349  }
350 
351  /// Set the function and the \gradient
352  int base_set_de(func_t &ufunc, dfunc_t &udfunc) {
353  func=&ufunc;
354  grad=&udfunc;
355  grad_given=true;
356  return 0;
357  }
358 
359  /// Allocate memory
360  int base_allocate(size_t nn) {
361  dim=nn;
362  return 0;
363  }
364 
365  /// Clear allocated memory
366  int base_free() {
367  dim=0;
368  return 0;
369  }
370 
371 
372  };
373 
374  /** \brief Multidimensional minimization by the Fletcher-Reeves
375  conjugate \gradient algorithm (GSL)
376 
377  This class performs multidimensional minimization by the
378  Fletcher-Reeves conjugate \gradient algorithm (GSL). The
379  functions mmin() and mmin_de() \minimize a given function until
380  the \gradient is smaller than the value of mmin::tol_rel
381  (which defaults to \f$ 10^{-4} \f$ ).
382 
383  This class has a high-level interface using mmin() or mmin_de()
384  which automatically performs the memory allocation and
385  minimization, or a GSL-like interface using allocate(), free(),
386  interate() and set() or set_simplex().
387 
388  See an example for the usage of this class
389  in \ref ex_mmin_sect .
390 
391  Default template arguments
392  - \c func_t - \ref multi_funct11
393  - \c vec_t - \ref boost::numeric::ublas::vector < double >
394  - \c dfunc_t - \ref grad_funct11
395  - \c auto_grad_t - \ref gradient < func_t >
396  - \c def_auto_grad_t - \ref gradient_gsl < func_t >
397 
398  Note that the state variable \c max_iter has not been included
399  here, because it was not really used in the original GSL code
400  for these minimizers.
401  */
402  template<class func_t = multi_funct11,
403  class vec_t = boost::numeric::ublas::vector<double>,
404  class dfunc_t = grad_funct11,
405  class auto_grad_t = gradient<multi_funct11,
406  boost::numeric::ublas::vector<double> >,
407  class def_auto_grad_t =
409  class mmin_conf :
410  public mmin_gsl_base<func_t,vec_t,dfunc_t,auto_grad_t,def_auto_grad_t> {
411 
412 #ifndef DOXYGEN_INTERNAL
413 
414  protected:
415 
416  /// \name The original variables from the GSL state structure
417  //@{
418  /// Iteration number
419  int iter;
420  /// Stepsize
421  double step;
422  /// Tolerance
423  double tol;
424  /// Desc
425  vec_t x1;
426  /// Desc
427  vec_t dx1;
428  /// Desc
429  vec_t x2;
430  /// Desc
431  double pnorm;
432  /// Desc
433  vec_t p;
434  /// Desc
435  double g0norm;
436  /// Desc
437  vec_t g0;
438  //@}
439 
440  /// \name Store the arguments to set() so we can use them for iterate()
441  //@{
442  /// Proposed minimum
443  vec_t ugx;
444  /// Gradient
445  vec_t ugg;
446  /// Proposed step
447  vec_t udx;
448  /// Desc
449  double it_min;
450  //@}
451 
452 #endif
453 
454  public:
455 
456  mmin_conf() {
457  lmin_tol=1.0e-4;
458  this->tol_rel=1.0e-4;
459  step_size=0.01;
460  }
461 
462  virtual ~mmin_conf() {}
463 
464  /// Tolerance for the line minimization (default \f$ 10^{-4} \f$)
465  double lmin_tol;
466 
467  /// Size of the initial step (default 0.01)
468  double step_size;
469 
470  /// \name GSL-like lower level interface
471  //@{
472  /// Perform an iteration
473  virtual int iterate() {
474 
475  if (this->dim==0) {
476  O2SCL_ERR2("Memory not allocated in ",
477  "mmin_conf::iterate().",exc_efailed);
478  }
479 
480  vec_t &x=ugx;
481  vec_t &gradient=ugg;
482  vec_t &dx=udx;
483 
484  double fa = it_min, fb, fc;
485  double dir;
486  double stepa = 0.0, stepb, stepc = step;
487 
488  double g1norm;
489  double pg;
490 
491  if (pnorm == 0.0 || g0norm == 0.0) {
492  for(size_t i=0;i<this->dim;i++) dx[i]=0.0;
493  O2SCL_CONV2_RET("Either pnorm or g0norm vanished in ",
494  "in mmin_conf::iterate().",
495  exc_enoprog,this->err_nonconv);
496  }
497 
498  /* [GSL] Determine which direction is downhill, +p or -p */
499 
500  pg=o2scl_cblas::ddot(this->dim,p,gradient);
501 
502  dir = (pg >= 0.0) ? +1.0 : -1.0;
503 
504  /* [GSL] Compute new trial point at x_c= x - step * p, where p is the
505  current direction */
506 
507  this->take_step (x, p, stepc, dir / pnorm, x1, dx);
508 
509  /* [GSL] Evaluate function and gradient at new point xc */
510 
511  fc=(*this->func)(this->dim,x1);
512 
513  if (fc < fa) {
514 
515  /* [GSL] Success, reduced the function value */
516  step = stepc * 2.0;
517  it_min = fc;
518  for(size_t i=0;i<this->dim;i++) {
519  x[i]=x1[i];
520  }
521 
522  if (this->grad_given) {
523  (*this->grad)(this->dim,x1,gradient);
524  } else {
525  (*this->agrad)(this->dim,x1,gradient);
526  }
527 
528  return success;
529  }
530 
531  /* [GSL] Do a line minimization in the region (xa,fa) (xc,fc) to find an
532  intermediate (xb,fb) satisifying fa > fb < fc. Choose an initial
533  xb based on parabolic interpolation
534  */
535  this->intermediate_point(x,p,dir / pnorm,pg,stepa,stepc,fa,fc,x1,
536  dx1,gradient,&stepb,&fb);
537 
538  if (stepb == 0.0) {
539  O2SCL_CONV2_RET("Variable stepb vanished in ",
540  "in mmin_conf::iterate().",
541  exc_enoprog,this->err_nonconv);
542  }
543 
544  this->min(x,p,dir / pnorm,stepa,stepb,stepc,fa,fb,fc,tol,
545  x1,dx1,x2,dx,gradient,&step,&it_min,&g1norm);
546 
547  for(size_t i=0;i<this->dim;i++) x[i]=x2[i];
548 
549  /* [GSL] Choose a new conjugate direction for the next step */
550  iter=(iter+1) % this->dim;
551 
552  if (iter==0) {
553 
554  for(size_t i=0;i<this->dim;i++) p[i]=gradient[i];
555  pnorm=g1norm;
556 
557  } else {
558 
559  /* [GSL] p' = g1 - beta * p */
560  double beta=-pow(g1norm/g0norm, 2.0);
561  o2scl_cblas::dscal(-beta,this->dim,p);
562  o2scl_cblas::daxpy(1.0,this->dim,gradient,p);
563  pnorm=o2scl_cblas::dnrm2(this->dim,p);
564  }
565 
566  g0norm=g1norm;
567  for(size_t i=0;i<this->dim;i++) {
568  g0[i]=gradient[i];
569  }
570 
571  return success;
572  }
573 
574  /** \brief Allocate the memory
575  */
576  virtual int allocate(size_t n) {
577 
578  x1.resize(n);
579  dx1.resize(n);
580  x2.resize(n);
581  p.resize(n);
582  g0.resize(n);
583  udx.resize(n);
584  ugg.resize(n);
585  ugx.resize(n);
586 
587  this->base_allocate(n);
588 
589  return success;
590 
591  }
592 
593  /// Free the allocated memory
594  virtual int free() {
595 
596  this->base_free();
597 
598  return 0;
599  }
600 
601  /// Reset the minimizer to use the current point as a new starting point
602  int restart() {
603  iter=0;
604  return success;
605  }
606 
607  /// Set the function and initial guess
608  virtual int set(vec_t &x, double u_step_size, double tol_u,
609  func_t &ufunc) {
610 
611  if (this->dim==0) {
612  O2SCL_ERR2("Memory not allocated in ",
613  "mmin_conf::set().",exc_efailed);
614  }
615 
616  this->base_set(ufunc,*this->agrad);
617  for(size_t i=0;i<this->dim;i++) ugx[i]=x[i];
618 
619  iter=0;
620  step=u_step_size;
621  tol=tol_u;
622 
623  /// Evaluate the function and its gradient
624  it_min=ufunc(this->dim,x);
625  this->agrad->set_function(ufunc);
626  (*this->agrad)(this->dim,x,ugg);
627 
628  /* [GSL] Use the gradient as the initial direction */
629 
630  for(size_t i=0;i<this->dim;i++) {
631  p[i]=ugg[i];
632  g0[i]=ugg[i];
633  }
634 
635  double gnorm=o2scl_cblas::dnrm2(this->dim,ugg);
636 
637  pnorm=gnorm;
638  g0norm=gnorm;
639 
640  return success;
641  }
642 
643  /// Set the function and initial guess
644  virtual int set_de(vec_t &x, double u_step_size, double tol_u,
645  func_t &ufunc, dfunc_t &udfunc) {
646 
647  if (this->dim==0) {
648  O2SCL_ERR2("Memory not allocated in ",
649  "mmin_conf::set_de().",exc_efailed);
650  }
651 
652  this->base_set_de(ufunc,udfunc);
653  for(size_t i=0;i<this->dim;i++) ugx[i]=x[i];
654 
655  iter=0;
656  step=u_step_size;
657  tol=tol_u;
658 
659  // Evaluate the function and its gradient
660  it_min=ufunc(this->dim,x);
661  udfunc(this->dim,x,ugg);
662 
663  /* Use the gradient as the initial direction */
664 
665  for(size_t i=0;i<this->dim;i++) {
666  p[i]=ugg[i];
667  g0[i]=ugg[i];
668  }
669 
670  double gnorm=o2scl_cblas::dnrm2(this->dim,ugg);
671 
672  pnorm=gnorm;
673  g0norm=gnorm;
674 
675  return success;
676  }
677  //@}
678 
679  /// \name Basic usage
680  //@{
681  /** \brief Calculate the minimum \c min of \c func w.r.t the
682  array \c x of size \c nvar.
683  */
684  virtual int mmin(size_t nn, vec_t &xx, double &fmin,
685  func_t &ufunc) {
686 
687  if (nn==0) {
688  O2SCL_ERR2("Tried to min over zero variables ",
689  "in mmin_conf::mmin().",exc_einval);
690  }
691 
692  int xiter=0, status;
693 
694  allocate(nn);
695 
696  set(xx,step_size,lmin_tol,ufunc);
697 
698  do {
699 
700  xiter++;
701 
702  status=iterate();
703 
704  if (status) {
705  break;
706  }
707 
708  // Equivalent to gsl_multimin_test_gradient with
709  // additional code to print out present iteration
710 
711  double norm=o2scl_cblas::dnrm2(nn,ugg);
712 
713  if(this->verbose>0) {
714  this->print_iter(nn,ugx,it_min,xiter,
715  norm,this->tol_rel,type());
716  }
717 
718  if (norm<this->tol_rel) status=success;
719  else status=gsl_continue;
720 
721  } while (status==gsl_continue && xiter < this->ntrial);
722 
723  for(size_t i=0;i<nn;i++) xx[i]=ugx[i];
724  fmin=it_min;
725 
726  free();
727  this->last_ntrial=xiter;
728 
729  if (status==gsl_continue && xiter==this->ntrial) {
730  std::string err="Exceeded max number of iterations, "+
731  dtos(this->ntrial)+", in mmin_conf::mmin().";
732  O2SCL_CONV_RET(err.c_str(),exc_emaxiter,this->err_nonconv);
733  }
734 
735  return status;
736  }
737 
738  /** \brief Calculate the minimum \c min of \c func w.r.t the
739  array \c x of size \c nvar.
740  */
741  virtual int mmin_de(size_t nn, vec_t &xx, double &fmin,
742  func_t &ufunc, dfunc_t &udfunc) {
743 
744  if (nn==0) {
745  O2SCL_ERR2("Tried to min over zero variables ",
746  "in mmin_conf::mmin_de().",exc_einval);
747  }
748 
749  int xiter=0, status;
750 
751  allocate(nn);
752 
753  set_de(xx,step_size,lmin_tol,ufunc,udfunc);
754 
755  do {
756  xiter++;
757 
758  status=iterate();
759 
760  if (status) {
761  break;
762  }
763 
764  // Equivalent to gsl_multimin_test_gradient with
765  // additional code to print out present iteration
766 
767  double norm=o2scl_cblas::dnrm2(nn,ugg);
768 
769  if(this->verbose>0) {
770  this->print_iter(nn,ugx,it_min,xiter,
771  norm,this->tol_rel,type());
772  }
773 
774  if (norm<this->tol_rel) status=success;
775  else status=gsl_continue;
776 
777  }
778  while (status==gsl_continue && xiter < this->ntrial);
779 
780  for(size_t i=0;i<nn;i++) xx[i]=ugx[i];
781  fmin=it_min;
782 
783  free();
784  this->last_ntrial=xiter;
785 
786  if (status==gsl_continue && xiter==this->ntrial) {
787  std::string err="Exceeded max number of iterations, "+
788  dtos(this->ntrial)+", in mmin_conf::mmin().";
789  O2SCL_CONV_RET(err.c_str(),exc_emaxiter,this->err_nonconv);
790  }
791 
792  return status;
793  }
794  //@}
795 
796  /// Return string denoting type("mmin_conf")
797  virtual const char *type() { return "mmin_conf";}
798 
799 #ifndef DOXYGEN_INTERNAL
800 
801  private:
802 
806  (const mmin_conf<func_t,vec_t,dfunc_t,auto_grad_t,def_auto_grad_t>&);
807 
808 #endif
809 
810  };
811 
812 #ifndef DOXYGEN_NO_O2NS
813 }
814 #endif
815 
816 #endif
Class for automatically computing gradients [abstract base].
Definition: mmin.h:54
int base_set_de(func_t &ufunc, dfunc_t &udfunc)
Set the function and the gradient gradient .
Definition: mmin_conf.h:352
virtual int iterate()
Perform an iteration.
Definition: mmin_conf.h:473
int base_allocate(size_t nn)
Allocate memory.
Definition: mmin_conf.h:360
The main O<span style=&#39;position: relative; top: 0.3em; font-size: 0.8em&#39;>2</span>scl O$_2$scl names...
Definition: anneal.h:42
virtual const char * type()
Return string denoting type("mmin_conf")
Definition: mmin_conf.h:797
#define O2SCL_CONV_RET(d, n, b)
Set a "convergence" error and return the error value.
Definition: err_hnd.h:292
double it_min
Desc.
Definition: mmin_conf.h:449
virtual int mmin_de(size_t nn, vec_t &xx, double &fmin, func_t &ufunc, dfunc_t &udfunc)
Calculate the minimum min of func w.r.t the array x of size nvar.
Definition: mmin_conf.h:741
virtual int free()
Free the allocated memory.
Definition: mmin_conf.h:594
vec_t g0
Desc.
Definition: mmin_conf.h:437
bool grad_given
If true, a gradient has been specified.
Definition: mmin_conf.h:88
vec_t ugg
Gradient.
Definition: mmin_conf.h:445
const char * type()
Return string denoting type ("mmin_base")
Definition: mmin.h:275
void min(const vec_t &x, const vec_t &xp, double lambda, double stepa, double stepb, double stepc, double fa, double fb, double fc, double xtol, vec_t &x1x, vec_t &dx1x, vec_t &x2x, vec_t &dx2x, vec_t &gradient, double *xstep, double *f, double *gnorm_u)
Perform the minimization.
Definition: mmin_conf.h:186
int base_set(func_t &ufunc, auto_grad_t &u_def_grad)
Set the function.
Definition: mmin_conf.h:344
invalid argument supplied by user
Definition: err_hnd.h:59
vec_t p
Desc.
Definition: mmin_conf.h:433
int restart()
Reset the minimizer to use the current point as a new starting point.
Definition: mmin_conf.h:602
vec_t dx1
Desc.
Definition: mmin_conf.h:427
void intermediate_point(const vec_t &x, const vec_t &px, double lambda, double pg, double stepa, double stepc, double fa, double fc, vec_t &x1x, vec_t &dx, vec_t &gradient, double *stepx, double *f)
Line minimization.
Definition: mmin_conf.h:113
exceeded max number of iterations
Definition: err_hnd.h:73
virtual int mmin(size_t nn, vec_t &xx, double &fmin, func_t &ufunc)
Calculate the minimum min of func w.r.t the array x of size nvar.
Definition: mmin_conf.h:684
func_t * func
User-specified function.
Definition: mmin_conf.h:79
#define O2SCL_CONV2_RET(d, d2, n, b)
Set an error and return the error value, two-string version.
Definition: err_hnd.h:298
Base minimization routines for mmin_conf and mmin_conp.
Definition: mmin_conf.h:69
size_t dim
Memory size.
Definition: mmin_conf.h:91
double g0norm
Desc.
Definition: mmin_conf.h:435
iteration has not converged
Definition: err_hnd.h:51
int base_free()
Clear allocated memory.
Definition: mmin_conf.h:366
generic failure
Definition: err_hnd.h:61
int nmaxiter
Maximum iterations for line minimization (default 10)
Definition: mmin_conf.h:331
int iter
Iteration number.
Definition: mmin_conf.h:419
double dnrm2(const size_t N, const vec_t &X)
Compute the norm of the vector X.
Definition: cblas_base.h:156
bool err_nonconv
If true, call the error handler if the routine does not "converge".
Definition: mmin.h:209
iteration is not making progress toward solution
Definition: err_hnd.h:105
Multidimensional minimization [abstract base].
Definition: mmin.h:164
int print_iter(size_t nv, vec2_t &x, double y, int iter, double value, double limit, std::string comment)
Print out iteration information.
Definition: mmin.h:249
double step
Stepsize.
Definition: mmin_conf.h:421
vec_t x2
Desc.
Definition: mmin_conf.h:429
#define O2SCL_ERR2(d, d2, n)
Set an error, two-string version.
Definition: err_hnd.h:281
double deriv_h
Stepsize for finite-differencing ( default )
Definition: mmin_conf.h:328
std::string dtos(double x, int prec=6, bool auto_prec=false)
Convert a double to a string.
auto_grad_t * agrad
Automatic gradient object.
Definition: mmin_conf.h:85
double ddot(const size_t N, const vec_t &X, const vec2_t &Y)
Compute .
Definition: cblas_base.h:123
vec_t udx
Proposed step.
Definition: mmin_conf.h:447
Simple automatic computation of gradient by finite differencing.
Definition: mmin.h:96
int last_ntrial
The number of iterations for in the most recent minimization.
Definition: mmin.h:206
vec_t ugx
Proposed minimum.
Definition: mmin_conf.h:443
void dscal(const double alpha, const size_t N, vec_t &X)
Compute .
Definition: cblas_base.h:190
std::function< int(size_t, boost::numeric::ublas::vector< double > &, boost::numeric::ublas::vector< double > &)> grad_funct11
Array of multi-dimensional functions typedef.
Definition: mmin.h:42
double lmin_tol
Tolerance for the line minimization (default )
Definition: mmin_conf.h:465
void daxpy(const double alpha, const size_t N, const vec_t &X, vec2_t &Y)
Compute .
Definition: cblas_base.h:98
double tol_rel
Function value tolerance.
Definition: mmin.h:200
double step_size
Size of the initial step (default 0.01)
Definition: mmin_conf.h:468
virtual int set_de(vec_t &x, double u_step_size, double tol_u, func_t &ufunc, dfunc_t &udfunc)
Set the function and initial guess.
Definition: mmin_conf.h:644
def_auto_grad_t def_grad
Default automatic gradient gradient object.
Definition: mmin_conf.h:334
vec_t x1
Desc.
Definition: mmin_conf.h:425
double pnorm
Desc.
Definition: mmin_conf.h:431
std::function< double(size_t, const boost::numeric::ublas::vector< double > &)> multi_funct11
Multi-dimensional function typedef.
Definition: multi_funct.h:45
Multidimensional minimization by the Fletcher-Reeves conjugate gradient gradient algorithm (GSL) ...
Definition: mmin_conf.h:409
double tol
Tolerance.
Definition: mmin_conf.h:423
Success.
Definition: err_hnd.h:47
dfunc_t * grad
User-specified gradient.
Definition: mmin_conf.h:82
virtual int allocate(size_t n)
Allocate the memory.
Definition: mmin_conf.h:576
void take_step(const vec_t &x, const vec_t &px, double stepx, double lambda, vec_t &x1x, vec_t &dx)
Take a step.
Definition: mmin_conf.h:94
int ntrial
Maximum number of iterations.
Definition: mmin.h:197

Documentation generated with Doxygen. Provided under the GNU Free Documentation License (see License Information).