go home Home | Main Page | Modules | Namespace List | Class Hierarchy | Alphabetical List | Data Structures | File List | Namespace Members | Data Fields | Globals | Related Pages
itkQuasiNewtonLBFGSOptimizer.h
Go to the documentation of this file.
1 /*======================================================================
2 
3  This file is part of the elastix software.
4 
5  Copyright (c) University Medical Center Utrecht. All rights reserved.
6  See src/CopyrightElastix.txt or http://elastix.isi.uu.nl/legal.php for
7  details.
8 
9  This software is distributed WITHOUT ANY WARRANTY; without even
10  the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
11  PURPOSE. See the above copyright notices for more information.
12 
13 ======================================================================*/
14 
15 #ifndef __itkQuasiNewtonLBFGSOptimizer_h
16 #define __itkQuasiNewtonLBFGSOptimizer_h
17 
19 #include "itkLineSearchOptimizer.h"
20 #include <vector>
21 
22 namespace itk
23 {
55 {
56 public:
57 
60  typedef SmartPointer< Self > Pointer;
61  typedef SmartPointer< const Self > ConstPointer;
62 
63  itkNewMacro( Self );
65 
72 
73  typedef Array< double > RhoType;
74  typedef std::vector< ParametersType > SType;
75  typedef std::vector< DerivativeType > YType;
76  typedef Array< double > DiagonalMatrixType;
78 
80 
81  typedef enum {
90 
91  virtual void StartOptimization( void );
92 
93  virtual void ResumeOptimization( void );
94 
95  virtual void StopOptimization( void );
96 
98  itkGetConstMacro( CurrentIteration, unsigned long );
99  itkGetConstMacro( CurrentValue, MeasureType );
100  itkGetConstReferenceMacro( CurrentGradient, DerivativeType );
101  itkGetConstMacro( InLineSearch, bool );
102  itkGetConstReferenceMacro( StopCondition, StopConditionType );
103  itkGetConstMacro( CurrentStepLength, double );
104 
106  itkSetObjectMacro( LineSearchOptimizer, LineSearchOptimizerType );
107  itkGetObjectMacro( LineSearchOptimizer, LineSearchOptimizerType );
108 
110  itkGetConstMacro( MaximumNumberOfIterations, unsigned long );
111  itkSetClampMacro( MaximumNumberOfIterations, unsigned long,
113 
119  itkGetConstMacro( GradientMagnitudeTolerance, double );
120  itkSetMacro( GradientMagnitudeTolerance, double );
121 
125  itkSetClampMacro( Memory, unsigned int, 0, NumericTraits< unsigned int >::max() );
126  itkGetConstMacro( Memory, unsigned int );
127 
128 protected:
129 
132 
133  // \todo: should be implemented
134  void PrintSelf( std::ostream & os, Indent indent ) const {}
135 
138  unsigned long m_CurrentIteration;
140  bool m_Stop;
142 
145 
149 
150  unsigned int m_Point;
151  unsigned int m_PreviousPoint;
152  unsigned int m_Bound;
153 
154  itkSetMacro( InLineSearch, bool );
155 
160  virtual void ComputeDiagonalMatrix( DiagonalMatrixType & diag_H0 );
161 
168  virtual void ComputeSearchDirection(
169  const DerivativeType & gradient,
170  ParametersType & searchDir );
171 
176  virtual void LineSearch(
177  const ParametersType searchDir,
178  double & step,
179  ParametersType & x,
180  MeasureType & f,
181  DerivativeType & g );
182 
185  virtual void StoreCurrentPoint(
186  const ParametersType & step,
187  const DerivativeType & grad_dif );
188 
193  virtual bool TestConvergence( bool firstLineSearchDone );
194 
195 private:
196 
197  QuasiNewtonLBFGSOptimizer( const Self & ); // purposely not implemented
198  void operator=( const Self & ); // purposely not implemented
199 
203  unsigned int m_Memory;
204 
205 };
206 
207 } // end namespace itk
208 
216 /* LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION */
217 /* JORGE NOCEDAL */
218 /* *** July 1990 *** */
219 
220 /* This subroutine solves the unconstrained minimization problem */
221 
222 /* min F(x), x= (x1,x2,...,xN), */
223 
224 /* using the limited memory BFGS method. The routine is especially */
225 /* effective on problems involving a large number of variables. In */
226 /* a typical iteration of this method an approximation Hk to the */
227 /* inverse of the Hessian is obtained by applying M BFGS updates to */
228 /* a diagonal matrix Hk0, using information from the previous M steps. */
229 /* The user specifies the number M, which determines the amount of */
230 /* storage required by the routine. The user may also provide the */
231 /* diagonal matrices Hk0 if not satisfied with the default choice. */
232 /* The algorithm is described in "On the limited memory BFGS method */
233 /* for large scale optimization", by D. Liu and J. Nocedal, */
234 /* Mathematical Programming B 45 (1989) 503-528. */
235 
236 /* The user is required to calculate the function value F and its */
237 /* gradient G. In order to allow the user complete control over */
238 /* these computations, reverse communication is used. The routine */
239 /* must be called repeatedly under the control of the parameter */
240 /* IFLAG. */
241 
242 /* The steplength is determined at each iteration by means of the */
243 /* line search routine MCVSRCH, which is a slight modification of */
244 /* the routine CSRCH written by More' and Thuente. */
245 
246 /* The calling statement is */
247 
248 /* CALL LBFGS(N,M,X,F,G,DIAGCO,DIAG,IPRINT,EPS,XTOL,W,IFLAG) */
249 
250 /* where */
251 
252 /* N is an INTEGER variable that must be set by the user to the */
253 /* number of variables. It is not altered by the routine. */
254 /* Restriction: N>0. */
255 
256 /* M is an INTEGER variable that must be set by the user to */
257 /* the number of corrections used in the BFGS update. It */
258 /* is not altered by the routine. Values of M less than 3 are */
259 /* not recommended; large values of M will result in excessive */
260 /* computing time. 3<= M <=7 is recommended. Restriction: M>0. */
261 
262 /* X is a DOUBLE PRECISION array of length N. On initial entry */
263 /* it must be set by the user to the values of the initial */
264 /* estimate of the solution vector. On exit with IFLAG=0, it */
265 /* contains the values of the variables at the best point */
266 /* found (usually a solution). */
267 
268 /* F is a DOUBLE PRECISION variable. Before initial entry and on */
269 /* a re-entry with IFLAG=1, it must be set by the user to */
270 /* contain the value of the function F at the point X. */
271 
272 /* G is a DOUBLE PRECISION array of length N. Before initial */
273 /* entry and on a re-entry with IFLAG=1, it must be set by */
274 /* the user to contain the components of the gradient G at */
275 /* the point X. */
276 
277 /* DIAGCO is a LOGICAL variable that must be set to .TRUE. if the */
278 /* user wishes to provide the diagonal matrix Hk0 at each */
279 /* iteration. Otherwise it should be set to .FALSE., in which */
280 /* case LBFGS will use a default value described below. If */
281 /* DIAGCO is set to .TRUE. the routine will return at each */
282 /* iteration of the algorithm with IFLAG=2, and the diagonal */
283 /* matrix Hk0 must be provided in the array DIAG. */
284 
285 /* DIAG is a DOUBLE PRECISION array of length N. If DIAGCO=.TRUE., */
286 /* then on initial entry or on re-entry with IFLAG=2, DIAG */
287 /* it must be set by the user to contain the values of the */
288 /* diagonal matrix Hk0. Restriction: all elements of DIAG */
289 /* must be positive. */
290 
291 /* IPRINT is an INTEGER array of length two which must be set by the */
292 /* user. */
293 
294 /* IPRINT(1) specifies the frequency of the output: */
295 /* IPRINT(1) < 0 : no output is generated, */
296 /* IPRINT(1) = 0 : output only at first and last iteration, */
297 /* IPRINT(1) > 0 : output every IPRINT(1) iterations. */
298 
299 /* IPRINT(2) specifies the type of output generated: */
300 /* IPRINT(2) = 0 : iteration count, number of function */
301 /* evaluations, function value, norm of the */
302 /* gradient, and steplength, */
303 /* IPRINT(2) = 1 : same as IPRINT(2)=0, plus vector of */
304 /* variables and gradient vector at the */
305 /* initial point, */
306 /* IPRINT(2) = 2 : same as IPRINT(2)=1, plus vector of */
307 /* variables, */
308 /* IPRINT(2) = 3 : same as IPRINT(2)=2, plus gradient vector.*/
309 
310 /* EPS is a positive DOUBLE PRECISION variable that must be set by */
311 /* the user, and determines the accuracy with which the solution*/
312 /* is to be found. The subroutine terminates when */
313 
314 /* ||G|| < EPS max(1,||X||), */
315 
316 /* where ||.|| denotes the Euclidean norm. */
317 
318 /* XTOL is a positive DOUBLE PRECISION variable that must be set by */
319 /* the user to an estimate of the machine precision (e.g. */
320 /* 10**(-16) on a SUN station 3/60). The line search routine will*/
321 /* terminate if the relative width of the interval of uncertainty*/
322 /* is less than XTOL. */
323 
324 /* W is a DOUBLE PRECISION array of length N(2M+1)+2M used as */
325 /* workspace for LBFGS. This array must not be altered by the */
326 /* user. */
327 
328 /* IFLAG is an INTEGER variable that must be set to 0 on initial entry*/
329 /* to the subroutine. A return with IFLAG<0 indicates an error, */
330 /* and IFLAG=0 indicates that the routine has terminated without*/
331 /* detecting errors. On a return with IFLAG=1, the user must */
332 /* evaluate the function F and gradient G. On a return with */
333 /* IFLAG=2, the user must provide the diagonal matrix Hk0. */
334 
335 /* The following negative values of IFLAG, detecting an error, */
336 /* are possible: */
337 
338 /* IFLAG=-1 The line search routine MCSRCH failed. The */
339 /* parameter INFO provides more detailed information */
340 /* (see also the documentation of MCSRCH): */
341 
342 /* INFO = 0 IMPROPER INPUT PARAMETERS. */
343 
344 /* INFO = 2 RELATIVE WIDTH OF THE INTERVAL OF */
345 /* UNCERTAINTY IS AT MOST XTOL. */
346 
347 /* INFO = 3 MORE THAN 20 FUNCTION EVALUATIONS WERE */
348 /* REQUIRED AT THE PRESENT ITERATION. */
349 
350 /* INFO = 4 THE STEP IS TOO SMALL. */
351 
352 /* INFO = 5 THE STEP IS TOO LARGE. */
353 
354 /* INFO = 6 ROUNDING ERRORS PREVENT FURTHER PROGRESS.*/
355 /* THERE MAY NOT BE A STEP WHICH SATISFIES */
356 /* THE SUFFICIENT DECREASE AND CURVATURE */
357 /* CONDITIONS. TOLERANCES MAY BE TOO SMALL. */
358 
359 /* IFLAG=-2 The i-th diagonal element of the diagonal inverse */
360 /* Hessian approximation, given in DIAG, is not */
361 /* positive. */
362 
363 /* IFLAG=-3 Improper input parameters for LBFGS (N or M are */
364 /* not positive). */
365 
366 /* ON THE DRIVER: */
367 
368 /* The program that calls LBFGS must contain the declaration: */
369 
370 /* EXTERNAL LB2 */
371 
372 /* LB2 is a BLOCK DATA that defines the default values of several */
373 /* parameters described in the COMMON section. */
374 
375 /* COMMON: */
376 
377 /* The subroutine contains one common area, which the user may wish to */
378 /* reference: */
379 
380 /* awf added stpawf */
381 
382 /* MP is an INTEGER variable with default value 6. It is used as the */
383 /* unit number for the printing of the monitoring information */
384 /* controlled by IPRINT. */
385 
386 /* LP is an INTEGER variable with default value 6. It is used as the */
387 /* unit number for the printing of error messages. This printing */
388 /* may be suppressed by setting LP to a non-positive value. */
389 
390 /* GTOL is a DOUBLE PRECISION variable with default value 0.9, which */
391 /* controls the accuracy of the line search routine MCSRCH. If the */
392 /* function and gradient evaluations are inexpensive with respect */
393 /* to the cost of the iteration (which is sometimes the case when */
394 /* solving very large problems) it may be advantageous to set GTOL */
395 /* to a small value. A typical small value is 0.1. Restriction: */
396 /* GTOL should be greater than 1.D-04. */
397 
398 /* STPMIN and STPMAX are non-negative DOUBLE PRECISION variables which */
399 /* specify lower and uper bounds for the step in the line search. */
400 /* Their default values are 1.D-20 and 1.D+20, respectively. These */
401 /* values need not be modified unless the exponents are too large */
402 /* for the machine being used, or unless the problem is extremely */
403 /* badly scaled (in which case the exponents should be increased). */
404 
405 /* MACHINE DEPENDENCIES */
406 
407 /* The only variables that are machine-dependent are XTOL, */
408 /* STPMIN and STPMAX. */
409 
410 /* GENERAL INFORMATION */
411 
412 /* Other routines called directly: DAXPY, DDOT, LB1, MCSRCH */
413 
414 /* Input/Output : No input; diagnostic messages on unit MP and */
415 /* error messages on unit LP. */
416 
417 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
418 
419 #endif //#ifndef __itkQuasiNewtonLBFGSOptimizer_h
virtual bool TestConvergence(bool firstLineSearchDone)
Superclass::ParametersType ParametersType
virtual void ComputeDiagonalMatrix(DiagonalMatrixType &diag_H0)
SmartPointer< Self > Pointer
Superclass::DerivativeType DerivativeType
ITK version of the lbfgs algorithm ...
Superclass::CostFunctionType CostFunctionType
ScaledSingleValuedNonLinearOptimizer Superclass
void operator=(const Self &)
std::vector< ParametersType > SType
virtual void StartOptimization(void)
virtual void StoreCurrentPoint(const ParametersType &step, const DerivativeType &grad_dif)
int max(int a, int b)
virtual void ResumeOptimization(void)
LineSearchOptimizerPointer m_LineSearchOptimizer
A base class for LineSearch optimizers.
std::vector< DerivativeType > YType
virtual void LineSearch(const ParametersType searchDir, double &step, ParametersType &x, MeasureType &f, DerivativeType &g)
Superclass::ScaledCostFunctionType ScaledCostFunctionType
LineSearchOptimizerType::Pointer LineSearchOptimizerPointer
void PrintSelf(std::ostream &os, Indent indent) const
virtual void ComputeSearchDirection(const DerivativeType &gradient, ParametersType &searchDir)
virtual void StopOptimization(void)


Generated on 27-04-2014 for elastix by doxygen 1.8.6 elastix logo