mmin_constr_pgrad.h
Go to the documentation of this file.
1 /*
2  -------------------------------------------------------------------
3 
4  Copyright (C) 2006-2017, Andrew W. Steiner
5 
6  This file is part of O2scl.
7 
8  O2scl is free software; you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation; either version 3 of the License, or
11  (at your option) any later version.
12 
13  O2scl is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with O2scl. If not, see <http://www.gnu.org/licenses/>.
20 
21  -------------------------------------------------------------------
22 */
23 /*--------------------------------------------------------------------------*
24  * Open Optimization Library - Projected Gradient Method
25  *
26  * pgrad/pgrad.c
27  *
28  * This program is free software; you can redistribute it and/or modify
29  * it under the terms of the GNU General Public License as published by
30  * the Free Software Foundation; either version 2 of the License, or (at
31  * your option) any later version.
32  *
33  * This program is distributed in the hope that it will be useful, but
34  * WITHOUT ANY WARRANTY; without even the implied warranty of
35  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
36  * General Public License for more details.
37  *
38  * You should have received a copy of the GNU General Public License
39  * along with this program; if not, write to the Free Software
40  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
41  *
42  * Iara da Cunha
43  * since: June, 29, 2004
44  *
45  * $Id: pgrad.c,v 1.14 2005/05/19 19:37:07 biloti Exp $
46  *--------------------------------------------------------------------------*/
47 #ifndef O2SCL_OOL_MMIN_PGRAD_H
48 #define O2SCL_OOL_MMIN_PGRAD_H
49 
50 /** \file mmin_constr_pgrad.h
51  \brief File defining \ref o2scl::mmin_constr_pgrad
52 */
53 
54 #include <gsl/gsl_math.h>
55 
56 #include <o2scl/multi_funct.h>
57 #include <o2scl/mmin_constr.h>
58 
59 #ifndef DOXYGEN_NO_O2NS
60 namespace o2scl {
61 #endif
62 
63  /** \brief Constrained minimization by the projected gradient method (OOL)
64 
65  This is the simple extension of the steepest descent algorithm
66  to constrained minimization. Each step is a line search is
67  performed along the projected gradient direction subject to
68  the specified constraints.
69 
70  This algorithm is likely not ideal for most problems and is
71  provided mostly for demonstration and educational purposes.
72  Based on implementation of \ref Kelley99 in OOL.
73 
74  Default template arguments
75  - \c func_t - \ref multi_funct11
76  - \c dfunc_t - \ref grad_funct11
77  - \c vec_t - \ref boost::numeric::ublas::vector < double >
78 
79  \future Replace the explicit norm computation below with
80  the more accurate dnrm2 from linalg
81  \future Replace the generic variable 'tol' with 'tolf'
82  or 'tolx' from \ref o2scl::mmin_base.
83  */
84  template<class func_t=multi_funct11, class dfunc_t=grad_funct11,
87  public mmin_constr<func_t,dfunc_t,ool_hfunct11,vec_t> {
88 
89 #ifndef DOXYGEN_INTERNAL
90 
91  protected:
92 
93  /// A convenient typedef for the unused Hessian product type
95 
96  /// Temporary vector
97  vec_t xx;
98 
99  /// Project into feasible region
100  int proj(vec_t &xt) {
101  size_t ii, n=this->dim;
102 
103  for(ii=0;ii<n;ii++) {
104  double dtmp;
105  if (xt[ii]<this->U[ii]) dtmp=xt[ii];
106  else dtmp=this->U[ii];
107  if (this->L[ii]>dtmp) xt[ii]=this->L[ii];
108  else xt[ii]=dtmp;
109  //xt[ii]=GSL_MAX(this->L[ii],GSL_MIN(xt[ii],this->U[ii]));
110  }
111  return 0;
112  }
113 
114  /// Line search
115  int line_search() {
116 
117  double t, tnew, fx, fxx, dif2, gtd;
118 
119  fx=this->f;
120 
121  tnew=1;
122 
123  do {
124 
125  t = tnew;
126  /* xx = x - t df */
127  o2scl::vector_copy(this->dim,this->x,xx);
128  for(size_t i=0;i<this->dim;i++) xx[i]-=t*this->gradient[i];
129  proj(xx);
130 
131  fxx=(*this->func)(this->dim,xx);
132 
133  o2scl::vector_copy(this->dim,xx,this->dx);
134  for(size_t i=0;i<this->dim;i++) this->dx[i]-=this->x[i];
135 
136  dif2=0.0;
137  for(size_t i=0;i<this->dim;i++) dif2+=this->dx[i]*this->dx[i];
138 
139  gtd=0.0;
140  for(size_t i=0;i<this->dim;i++) {
141  gtd+=this->gradient[i]*this->dx[i];
142  }
143 
144  tnew = - t * t * gtd / (2*(fxx - this->f - gtd));
145 
146  double arg1=sigma1*t;
147  double arg2;
148  if (sigma2*t<tnew) arg2=sigma2*t;
149  else arg2=tnew;
150  if (arg1>arg2) tnew=arg1;
151  else tnew=arg2;
152 
153  // sufficient decrease condition (Armijo)
154  } while(fxx > fx - (alpha/t) * dif2 );
155 
156  o2scl::vector_copy(this->dim,xx,this->x);
157  this->f = fxx;
158 
159  (*this->dfunc)(this->dim,this->x,this->gradient);
160 
161  return 0;
162  }
163 
164 #endif
165 
166  public:
167 
169  fmin=-1.0e99;
170  tol=1.0e-4;
171  alpha=1.0e-4;
172  sigma1=0.1;
173  sigma2=0.9;
174  this->ntrial=1000;
175  }
176 
177  /** \brief Minimum function value (default \f$ 10^{-99} \f$)
178 
179  If the function value is below this value, then the algorithm
180  assumes that the function is not bounded and exits.
181  */
182  double fmin;
183 
184  /// Tolerance on infinite norm
185  double tol;
186 
187  /** \brief Constant for the sufficient decrease condition
188  (default \f$ 10^{-4} \f$)
189  */
190  double alpha;
191 
192  /// Lower bound to the step length reduction
193  double sigma1;
194 
195  /// Upper bound to the step length reduction
196  double sigma2;
197 
198  /// Allocate memory
199  virtual int allocate(const size_t n) {
200  if (this->dim>0) free();
201  xx.resize(n);
203  }
204 
205  /// Free previously allocated memory
206  virtual int free() {
207  xx.clear();
208  return 0;
209  }
210 
211  /// Set the function, the initial guess, and the parameters
212  virtual int set(func_t &fn, dfunc_t &dfn, vec_t &init) {
213 
215 
216  // Turn initial guess into a feasible point
217  proj(this->x);
218 
219  // Evaluate function and gradient
220  this->f=(*this->func)(this->dim,this->x);
221  (*this->dfunc)(this->dim,this->x,this->gradient);
222 
223  return 0;
224  }
225 
226  /// Restart the minimizer
227  virtual int restart() {
228  // Turn x into a feasible point
229  proj(this->x);
230 
231  // Evaluate function and gradient
232  this->f=(*this->func)(this->dim,this->x);
233  (*this->dfunc)(this->dim,this->x,this->gradient);
234 
235  return 0;
236  }
237 
238  /// Perform an iteration
239  virtual int iterate() {
240  double t;
241 
242  line_search();
243 
244  /* Infinite norm of g1 = d <- [P(x - g) - x] */
245  o2scl::vector_copy(this->dim,this->x,xx);
246  for(size_t i=0;i<this->dim;i++) {
247  xx[i]-=this->gradient[i];
248  }
249  proj(xx);
250  for(size_t i=0;i<this->dim;i++) xx[i]-=this->x[i];
251 
252  double maxval=fabs(xx[0]);
253  for(size_t i=1;i<this->dim;i++) {
254  if (fabs(xx[i])>maxval) {
255  maxval=fabs(xx[i]);
256  }
257  }
258  this->size=fabs(maxval);
259 
260  return success;
261  }
262 
263  /// See if we're finished
264  virtual int is_optimal() {
265  if (this->size>tol && this->f>fmin) {
266  return gsl_continue;
267  }
268  return success;
269  }
270 
271  /// Return string denoting type ("mmin_constr_pgrad")
272  const char *type() { return "mmin_constr_pgrad"; }
273 
274 #ifndef DOXYGEN_INTERNAL
275 
276  private:
277 
281  (const mmin_constr_pgrad<func_t,dfunc_t,vec_t>&);
282 
283 #endif
284 
285  };
286 
287 #ifndef DOXYGEN_NO_O2NS
288 }
289 #endif
290 
291 #endif
292 
Class for automatically computing gradients [abstract base].
Definition: mmin.h:54
virtual int restart()
Restart the minimizer.
double sigma1
Lower bound to the step length reduction.
Constrained minimization by the projected gradient method (OOL)
The main O<span style=&#39;position: relative; top: 0.3em; font-size: 0.8em&#39;>2</span>scl O$_2$scl names...
Definition: anneal.h:42
virtual int is_optimal()
See if we&#39;re finished.
double fmin
Minimum function value (default )
virtual int iterate()
Perform an iteration.
double sigma2
Upper bound to the step length reduction.
ool_hfunct11 hfunc_t
A convenient typedef for the unused Hessian product type.
double alpha
Constant for the sufficient decrease condition (default )
int line_search()
Line search.
iteration has not converged
Definition: err_hnd.h:51
Constrained multidimensional minimization (OOL) [abstract base].
Definition: mmin_constr.h:78
void vector_copy(const vec_t &src, vec2_t &dest)
Simple vector copy.
Definition: vector.h:127
virtual int free()
Free previously allocated memory.
vec_t xx
Temporary vector.
std::function< int(size_t, boost::numeric::ublas::vector< double > &, boost::numeric::ublas::vector< double > &)> grad_funct11
Array of multi-dimensional functions typedef.
Definition: mmin.h:42
std::function< int(size_t, const boost::numeric::ublas::vector< double > &, const boost::numeric::ublas::vector< double > &, boost::numeric::ublas::vector< double > &)> ool_hfunct11
Hessian product function for o2scl::mmin_constr.
Definition: mmin_constr.h:66
int proj(vec_t &xt)
Project into feasible region.
virtual int allocate(const size_t n)
Allocate memory.
Definition: mmin_constr.h:257
double tol
Tolerance on infinite norm.
virtual int allocate(const size_t n)
Allocate memory.
std::function< double(size_t, const boost::numeric::ublas::vector< double > &)> multi_funct11
Multi-dimensional function typedef.
Definition: multi_funct.h:45
const char * type()
Return string denoting type ("mmin_constr_pgrad")
virtual int set(func_t &fn, dfunc_t &dfn, vec_t &init)
Set the function, the gradient, and the initial guess.
Definition: mmin_constr.h:289
Success.
Definition: err_hnd.h:47
int ntrial
Maximum number of iterations.
Definition: mmin.h:197

Documentation generated with Doxygen. Provided under the GNU Free Documentation License (see License Information).