mmin_conp.h
Go to the documentation of this file.
1 /*
2  -------------------------------------------------------------------
3 
4  Copyright (C) 2006-2017, Andrew W. Steiner
5 
6  This file is part of O2scl.
7 
8  O2scl is free software; you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation; either version 3 of the License, or
11  (at your option) any later version.
12 
13  O2scl is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with O2scl. If not, see <http://www.gnu.org/licenses/>.
20 
21  -------------------------------------------------------------------
22 */
23 #ifndef O2SCL_MMIN_CONP_H
24 #define O2SCL_MMIN_CONP_H
25 
26 /** \file mmin_conp.h
27  \brief File defining \ref o2scl::mmin_conp
28 */
29 #include <gsl/gsl_blas.h>
30 #include <gsl/gsl_multimin.h>
31 #include <o2scl/mmin_conf.h>
32 
33 #ifndef DOXYGEN_NO_O2NS
34 namespace o2scl {
35 #endif
36 
37  /** \brief Multidimensional minimization by the Polak-Ribiere
38  conjugate gradient algorithm (GSL)
39 
40  The functions mmin() and mmin_de() min a given function
41  until the gradient is smaller than the value of mmin::tol_rel
42  (which defaults to \f$ 10^{-4} \f$ ).
43 
44  See an example for the usage of this class in
45  \ref ex_mmin_sect .
46  */
47  template<class func_t = multi_funct11,
49  class dfunc_t = grad_funct11,
50  class auto_grad_t = gradient<multi_funct11,
52  class def_auto_grad_t =
53  gradient_gsl<multi_funct11,boost::numeric::ublas::vector<double> > >
54  class mmin_conp :
55  public mmin_conf<func_t,vec_t,dfunc_t,auto_grad_t,def_auto_grad_t> {
56 
57  public:
58 
59  mmin_conp() : mmin_conf<func_t,vec_t,dfunc_t,auto_grad_t,
60  def_auto_grad_t>() {
61  }
62 
63  /// Perform an iteration
64  virtual int iterate() {
65 
66  if (this->dim==0) {
67  O2SCL_ERR("Memory not allocated in iterate().",exc_efailed);
68  }
69 
70  vec_t &x=this->ugx;
71  vec_t &gradient=this->ugg;
72  vec_t &dx=this->udx;
73 
74  double fa = this->it_min, fb, fc;
75  double dir;
76  double stepa = 0.0, stepb, stepc=this->step;
77 
78  double g1norm;
79  double pg;
80 
81  if (this->pnorm == 0.0 || this->g0norm == 0.0) {
82  for(size_t i=0;i<this->dim;i++) dx[i]=0.0;
83  O2SCL_CONV2_RET("Either pnorm or g0norm vanished in ",
84  "in mmin_conp::iterate().",
85  exc_enoprog,this->err_nonconv);
86  return 0;
87  }
88 
89  /* Determine which direction is downhill, +p or -p */
90 
91  pg=o2scl_cblas::ddot(this->dim,this->p,gradient);
92 
93  dir = (pg >= 0.0) ? +1.0 : -1.0;
94 
95  /* Compute new trial point at x_c= x - step * p, where p is the
96  current direction */
97 
98  this->take_step(x, this->p, stepc, dir / this->pnorm, this->x1, dx);
99 
100  /* Evaluate function and gradient at new point xc */
101 
102  fc=(*this->func)(this->dim,this->x1);
103 
104  if (fc < fa) {
105 
106  /* Success, reduced the function value */
107  this->step = stepc * 2.0;
108  this->it_min = fc;
109  for(size_t i=0;i<this->dim;i++) {
110  x[i]=this->x1[i];
111  }
112 
113  if (this->grad_given) {
114  (*this->grad)(this->dim,this->x1,gradient);
115  } else {
116  (*this->agrad)(this->dim,this->x1,gradient);
117  }
118 
119  return success;
120  }
121 
122  /* Do a line minimisation in the region (xa,fa) (xc,fc) to find an
123  intermediate (xb,fb) satisifying fa > fb < fc. Choose an initial
124  xb based on parabolic interpolation */
125 
126  this->intermediate_point(x, this->p, dir / this->pnorm, pg,
127  stepa, stepc, fa, fc, this->x1, this->dx1,
128  gradient, &stepb, &fb);
129 
130  if (stepb == 0.0) {
131  O2SCL_CONV2_RET("Variable stepb vanished in mmin_conp::",
132  "iterate().",exc_enoprog,this->err_nonconv);
133  }
134 
135  this->min(x,this->p,dir / this->pnorm,stepa,stepb,stepc, fa,
136  fb, fc, this->tol, this->x1, this->dx1, this->x2,
137  dx, gradient, &(this->step), &(this->it_min), &g1norm);
138 
139  for(size_t i=0;i<this->dim;i++) x[i]=this->x2[i];
140 
141  /* Choose a new conjugate direction for the next step */
142 
143  this->iter = (this->iter + 1) % this->dim;
144 
145  if (this->iter == 0) {
146  for(size_t i=0;i<this->dim;i++) this->p[i]=gradient[i];
147  this->pnorm = g1norm;
148  } else {
149  /* p' = g1 - beta * p */
150  double g0g1, beta;
151 
152  /* g0' = g0 - g1 */
153  o2scl_cblas::daxpy(-1.0,this->dim,gradient,this->g0);
154  /* g1g0 = (g0-g1).g1 */
155  g0g1=o2scl_cblas::ddot(this->dim,this->g0,gradient);
156  /* beta = -((g1 - g0).g1)/(g0.g0) */
157  beta = g0g1 / (this->g0norm*this->g0norm);
158 
159  o2scl_cblas::dscal(-beta,this->dim,this->p);
160  o2scl_cblas::daxpy(1.0,this->dim,gradient,this->p);
161  this->pnorm = o2scl_cblas::dnrm2(this->dim,this->p);
162  }
163 
164  this->g0norm = g1norm;
165  for(size_t i=0;i<this->dim;i++) {
166  this->g0[i]=gradient[i];
167  }
168 
169  return success;
170  }
171 
172  /// Return string denoting type("mmin_conp")
173  virtual const char *type() { return "mmin_conp";}
174 
175 #ifndef DOXYGEN_INTERNAL
176 
177  private:
178 
182  (const mmin_conp<func_t,vec_t,dfunc_t,auto_grad_t,def_auto_grad_t>&);
183 
184 #endif
185 
186  };
187 
188 #ifndef DOXYGEN_NO_O2NS
189 }
190 #endif
191 
192 #endif
Class for automatically computing gradients [abstract base].
Definition: mmin.h:54
The main O<span style=&#39;position: relative; top: 0.3em; font-size: 0.8em&#39;>2</span>scl O$_2$scl names...
Definition: anneal.h:42
double it_min
Desc.
Definition: mmin_conf.h:449
vec_t g0
Desc.
Definition: mmin_conf.h:437
bool grad_given
If true, a gradient has been specified.
Definition: mmin_conf.h:88
vec_t ugg
Gradient.
Definition: mmin_conf.h:445
void min(const vec_t &x, const vec_t &xp, double lambda, double stepa, double stepb, double stepc, double fa, double fb, double fc, double xtol, vec_t &x1x, vec_t &dx1x, vec_t &x2x, vec_t &dx2x, vec_t &gradient, double *xstep, double *f, double *gnorm_u)
Perform the minimization.
Definition: mmin_conf.h:186
vec_t p
Desc.
Definition: mmin_conf.h:433
vec_t dx1
Desc.
Definition: mmin_conf.h:427
void intermediate_point(const vec_t &x, const vec_t &px, double lambda, double pg, double stepa, double stepc, double fa, double fc, vec_t &x1x, vec_t &dx, vec_t &gradient, double *stepx, double *f)
Line minimization.
Definition: mmin_conf.h:113
func_t * func
User-specified function.
Definition: mmin_conf.h:79
#define O2SCL_CONV2_RET(d, d2, n, b)
Set an error and return the error value, two-string version.
Definition: err_hnd.h:298
size_t dim
Memory size.
Definition: mmin_conf.h:91
double g0norm
Desc.
Definition: mmin_conf.h:435
generic failure
Definition: err_hnd.h:61
int iter
Iteration number.
Definition: mmin_conf.h:419
double dnrm2(const size_t N, const vec_t &X)
Compute the norm of the vector X.
Definition: cblas_base.h:156
bool err_nonconv
If true, call the error handler if the routine does not "converge".
Definition: mmin.h:209
virtual int iterate()
Perform an iteration.
Definition: mmin_conp.h:64
iteration is not making progress toward solution
Definition: err_hnd.h:105
double step
Stepsize.
Definition: mmin_conf.h:421
vec_t x2
Desc.
Definition: mmin_conf.h:429
auto_grad_t * agrad
Automatic gradient object.
Definition: mmin_conf.h:85
double ddot(const size_t N, const vec_t &X, const vec2_t &Y)
Compute .
Definition: cblas_base.h:123
vec_t udx
Proposed step.
Definition: mmin_conf.h:447
#define O2SCL_ERR(d, n)
Set an error with message d and code n.
Definition: err_hnd.h:273
vec_t ugx
Proposed minimum.
Definition: mmin_conf.h:443
void dscal(const double alpha, const size_t N, vec_t &X)
Compute .
Definition: cblas_base.h:190
std::function< int(size_t, boost::numeric::ublas::vector< double > &, boost::numeric::ublas::vector< double > &)> grad_funct11
Array of multi-dimensional functions typedef.
Definition: mmin.h:42
void daxpy(const double alpha, const size_t N, const vec_t &X, vec2_t &Y)
Compute .
Definition: cblas_base.h:98
Multidimensional minimization by the Polak-Ribiere conjugate gradient algorithm (GSL) ...
Definition: mmin_conp.h:54
vec_t x1
Desc.
Definition: mmin_conf.h:425
virtual const char * type()
Return string denoting type("mmin_conp")
Definition: mmin_conp.h:173
double pnorm
Desc.
Definition: mmin_conf.h:431
std::function< double(size_t, const boost::numeric::ublas::vector< double > &)> multi_funct11
Multi-dimensional function typedef.
Definition: multi_funct.h:45
Multidimensional minimization by the Fletcher-Reeves conjugate gradient gradient algorithm (GSL) ...
Definition: mmin_conf.h:409
double tol
Tolerance.
Definition: mmin_conf.h:423
Success.
Definition: err_hnd.h:47
dfunc_t * grad
User-specified gradient.
Definition: mmin_conf.h:82
void take_step(const vec_t &x, const vec_t &px, double stepx, double lambda, vec_t &x1x, vec_t &dx)
Take a step.
Definition: mmin_conf.h:94

Documentation generated with Doxygen. Provided under the GNU Free Documentation License (see License Information).