19 const double EPS=3e-8;
43 template <
typename EnergyFunctor>
45 double *grad,
double *dir,
double *newPt,
48 double maxStep,
int &resCode){
54 const unsigned int MAX_ITER_LINEAR_SEARCH = 1000;
55 double sum=0.0,slope=0.0,test=0.0,
lambda=0.0;
56 double lambda2=0.0,lambdaMin=0.0,tmpLambda=0.0,val2=0.0;
62 for(
unsigned int i=0;i<dim;i++)
68 for(
unsigned int i=0;i<dim;i++)
69 dir[i] *= maxStep/sum;
75 for(
unsigned int i=0;i<dim;i++){
76 slope += dir[i]*grad[i];
83 for(
unsigned int i=0;i<dim;i++){
84 double temp=fabs(dir[i])/std::max(fabs(oldPt[i]),1.0);
85 if(temp>test) test=temp;
88 lambdaMin = MOVETOL/test;
91 while(it < MAX_ITER_LINEAR_SEARCH){
98 for(
unsigned int i=0;i<dim;i++){
99 newPt[i]=oldPt[i]+
lambda*dir[i];
101 newVal = func(newPt);
103 if( newVal-oldVal <= FUNCTOL*
lambda*slope ){
111 tmpLambda = -slope / (2.0*(newVal-oldVal-slope));
113 double rhs1 = newVal-oldVal-
lambda*slope;
114 double rhs2 = val2-oldVal-lambda2*slope;
115 double a = (rhs1/(
lambda*
lambda) - rhs2/(lambda2*lambda2))/
120 tmpLambda = -slope/(2.0*b);
122 double disc=b*b-3*a*slope;
126 tmpLambda = (-b+sqrt(disc))/(3.0*a);
128 tmpLambda = -slope/(b+sqrt(disc));
131 if( tmpLambda > 0.5*
lambda ){
142 for(
unsigned int i=0;i<dim;i++){
147 #define CLEANUP() { delete [] grad; delete [] dGrad; delete [] hessDGrad;\ 148 delete [] newPos; delete [] xi; delete [] invHessian; } 167 template <
typename EnergyFunctor,
typename GradientFunctor>
170 unsigned int &numIters,
173 GradientFunctor gradFunc,
175 unsigned int maxIts=MAXITS){
179 double sum,maxStep,fp;
181 double *grad,*dGrad,*hessDGrad;
185 grad =
new double[dim];
186 dGrad =
new double[dim];
187 hessDGrad =
new double[dim];
188 newPos =
new double[dim];
189 xi =
new double[dim];
190 invHessian =
new double[dim*dim];
197 memset(invHessian,0,dim*dim*
sizeof(
double));
198 for(
unsigned int i=0;i<dim;i++){
199 unsigned int itab=i*dim;
201 invHessian[itab+i]=1.0;
204 sum += pos[i]*pos[i];
207 maxStep = MAXSTEP * std::max(sqrt(sum),static_cast<double>(dim));
210 for(
unsigned int iter=1;iter<=maxIts;iter++){
215 linearSearch(dim,pos,fp,grad,xi,newPos,funcVal,func,maxStep,status);
223 for(
unsigned int i=0;i<dim;i++){
224 xi[i] = newPos[i]-pos[i];
226 double temp=fabs(xi[i])/std::max(fabs(pos[i]),1.0);
227 if(temp>test) test=temp;
237 double gradScale=gradFunc(pos,grad);
241 double term=std::max(funcVal*gradScale,1.0);
242 for(
unsigned int i=0;i<dim;i++){
243 double temp=fabs(grad[i])*std::max(fabs(pos[i]),1.0);
244 test=std::max(test,temp);
245 dGrad[i] = grad[i]-dGrad[i];
259 double fac=0,fae=0,sumDGrad=0,sumXi=0;
260 for(
unsigned int i=0;i<dim;i++){
261 unsigned int itab=i*dim;
263 for(
unsigned int j=0;j<dim;j++){
264 hessDGrad[i] += invHessian[itab+j]*dGrad[j];
267 fac += dGrad[i]*xi[i];
268 fae += dGrad[i]*hessDGrad[i];
269 sumDGrad += dGrad[i]*dGrad[i];
270 sumXi += xi[i]*xi[i];
272 if(fac > sqrt(EPS*sumDGrad*sumXi)){
274 double fad = 1.0/fae;
275 for(
unsigned int i=0;i<dim;i++){
276 dGrad[i] = fac*xi[i] - fad*hessDGrad[i];
278 for(
unsigned int i=0;i<dim;i++){
279 unsigned int itab=i*dim;
280 for(
unsigned int j=i;j<dim;j++){
281 invHessian[itab+j] += fac*xi[i]*xi[j] -
282 fad*hessDGrad[i]*hessDGrad[j] +
283 fae*dGrad[i]*dGrad[j];
284 invHessian[j*dim+i] = invHessian[itab+j];
289 for(
unsigned int i=0;i<dim;i++){
290 unsigned int itab=i*dim;
292 for(
unsigned int j=0;j<dim;j++){
293 xi[i] -= invHessian[itab+j]*grad[j];
const double MAXSTEP
Default maximim step size in the minimizer.
#define CHECK_INVARIANT(expr, mess)
int minimize(unsigned int dim, double *pos, double gradTol, unsigned int &numIters, double &funcVal, EnergyFunctor func, GradientFunctor gradFunc, double funcTol=TOLX, unsigned int maxIts=MAXITS)
Do a BFGS minimization of a function.
const double MOVETOL
Default tolerance for x changes in the minimizer.
const double lambda
scaling factor for rBO correction
const double FUNCTOL
Default tolerance for function convergence in the minimizer.
void linearSearch(unsigned int dim, double *oldPt, double oldVal, double *grad, double *dir, double *newPt, double &newVal, EnergyFunctor func, double maxStep, int &resCode)
Do a Quasi-Newton minimization along a line.
const double TOLX
Default direction vector tolerance in the minimizer.
#define PRECONDITION(expr, mess)
const double EPS
Default gradient tolerance in the minimizer.
const int MAXITS
Default maximum number of iterations.