ConjugateGradient.cpp

Go to the documentation of this file.
00001 /******************************************************************************/
00002 /*                                                                            */
00003 /*   C O N J U G A T E   G R A D I E N T   C L A S S                          */
00004 /*                                                                            */
00005 /*   Roberto Lopez                                                            */
00006 /*   International Center for Numerical Methods in Engineering (CIMNE)        */
00007 /*   Technical University of Catalonia (UPC)                                  */
00008 /*   Barcelona, Spain                                                         */
00009 /*   E-mail: rlopez@cimne.upc.edu                                             */
00010 /*                                                                            */
00011 /******************************************************************************/
00012 
00013 #include "ConjugateGradient.h"
00014 
00015 #include <iostream>
00016 #include <fstream>
00017 #include <algorithm>
00018 #include <functional>
00019 #include <limits>
00020 #include <math.h>
00021 #include <time.h>
00022 
00023 namespace Purple
00024 {
00025 
00026 // GENERAL CONSTRUCTOR
00027 
00028 /// General constructor. It creates a conjugate gradient optimization
00029 /// algorithm object associated to an objective function object. 
00030 /// It also initializes the class members to their default values:
00031 ///
00032 /// Initial argument: Random point whithin the objective function domain.
00033 ///
00034 /// Optimization operators:
00035 /// <ul>
00036 /// <li> Search direction method = Polak-Ribiere;
00037 /// <li> Optimal step size method = Brent method;
00038 /// </ul>
00039 ///
00040 /// Optimization parameters:
00041 /// <ul>
00042 /// <li> First step size: 1.0e-3.
00043 /// <li> Optimal step size tolerance: 1.0e-3.
00044 /// </ul>
00045 ///
00046 /// Stopping criteria:
00047 /// <ul> 
00048 /// <li> Evaluation goal: -1.0e69.
00049 /// <li> Gradient norm goal: 0.0.
00050 /// <li> Maximum time: 1.0e6.
00051 /// <li> Maximum number of iterations: 1000. 
00052 /// </ul> 
00053 ///  
00054 /// User stuff: 
00055 /// <ul>
00056 /// <li> Warning step size: 1000.
00057 /// <li> Show period: 25. 
00058 /// </ul>
00059 ///
00060 /// @param newObjectiveFunction: Pointer to an objective function object.
00061 ///
00062 /// @see ObjectiveFunction.
00063 /// @see OptimizationAlgorithm.
00064 
00065 ConjugateGradient::ConjugateGradient(ObjectiveFunction* newObjectiveFunction)
00066 : OptimizationAlgorithm(newObjectiveFunction)
00067 {
00068    // Optimization operators
00069 
00070    searchDirectionMethod = PolakRibiere;
00071    optimalStepSizeMethod = BrentMethod;
00072 
00073    // Initial argument
00074 
00075    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00076 
00077    Vector<double> lowerBound = objectiveFunction->getLowerBound();
00078    Vector<double> upperBound = objectiveFunction->getUpperBound();
00079 
00080    Vector<double> newInitialArgument(numberOfVariables, 0.0);
00081 
00082    for(int i = 0; i < numberOfVariables; i++)
00083    {
00084       double random = (double)rand()/(RAND_MAX+1.0);
00085 
00086       newInitialArgument[i] 
00087       = lowerBound[i] + (upperBound[i] - lowerBound[i])*random;
00088    }
00089 
00090    initialArgument = newInitialArgument;
00091 
00092    // Optimization parameters 
00093 
00094    firstStepSize = 1.0e-3;
00095 
00096    optimalStepSizeTolerance = 1.0e-3;
00097 
00098    // Stopping criteria
00099 
00100    evaluationGoal = -1.0e69;
00101    gradientNormGoal = 0.0;
00102    maximumTime = 1.0e6;
00103    maximumNumberOfIterations = 1000;
00104 
00105    // User stuff
00106 
00107    warningStepSize = 1000.0;
00108    showPeriod = 25;
00109 }
00110 
00111 
00112 // DEFAULT CONSTRUCTOR
00113 
00114 /// Default constructor. It creates a conjugate gradient optimization algorithm 
00115 /// object not associated to any objective function object. 
00116 /// It also initializes the class members to their default values:
00117 ///
00118 /// Optimization operators:
00119 /// <ul>
00120 /// <li> Search direction method = Polak-Ribiere;
00121 /// <li> Optimal step size method = Brent method;
00122 /// </ul>
00123 ///
00124 /// Optimization parameters:
00125 /// <ul>
00126 /// <li> First step size: 1.0e-3.
00127 /// <li> Optimal step size tolerance: 1.0e-3.
00128 /// </ul>
00129 ///
00130 /// Stopping criteria:
00131 /// <ul> 
00132 /// <li> Evaluation goal: -1.0e69.
00133 /// <li> Gradient norm goal: 0.0.
00134 /// <li> Maximum time: 1.0e6.
00135 /// <li> Maximum number of iterations: 1000. 
00136 /// </ul> 
00137 ///  
00138 /// User stuff: 
00139 /// <ul>
00140 /// <li> Warning step size: 1000.
00141 /// <li> Show period: 25. 
00142 /// </ul>
00143 ///
00144 /// @see OptimizationAlgorithm.
00145 
00146 ConjugateGradient::ConjugateGradient(void) : OptimizationAlgorithm()
00147 {
00148    // Optimization operators
00149 
00150    searchDirectionMethod = PolakRibiere;
00151    optimalStepSizeMethod = BrentMethod;
00152 
00153    // Optimization parameters
00154 
00155    firstStepSize = 1.0e-3;
00156    optimalStepSizeTolerance = 1.0e-3;
00157 
00158    // Stopping criteria
00159 
00160    evaluationGoal = -1.0e69;
00161    gradientNormGoal = 0.0;
00162    maximumTime = 1.0e6;
00163    maximumNumberOfIterations = 1000;
00164 
00165    // User stuff
00166 
00167    warningStepSize = 1000.0;
00168    showPeriod = 25;
00169 }
00170 
00171 
00172 // DESTRUCTOR
00173 
00174 /// Destructor.
00175 
00176 ConjugateGradient::~ConjugateGradient(void)
00177 {
00178 
00179 }
00180 
00181 
00182 // METHODS
00183 
00184 // SearchDirectionMethod getSearchDirectionMethod(void) method
00185 
00186 /// This method returns the search direction method used for optimization.
00187 ///
00188 /// @see getFletcherReevesSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00189 /// @see getPolakRibiereSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00190 
00191 ConjugateGradient::SearchDirectionMethod 
00192 ConjugateGradient::getSearchDirectionMethod(void)
00193 {
00194    return(searchDirectionMethod);
00195 }
00196 
00197 
00198 // OptimalStepSizeMethod getOptimalStepSizeMethod(void) method
00199 
00200 /// This method returns the optimal step size method used for optimization.
00201 ///
00202 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00203 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00204 
00205 ConjugateGradient::OptimalStepSizeMethod
00206 ConjugateGradient::getOptimalStepSizeMethod(void)
00207 {
00208    return(optimalStepSizeMethod);
00209 }
00210 
00211 
00212 // Vector<double> getInitialArgument(void)
00213 
00214 /// This method returns the initial objective function argument to be used by 
00215 /// the conjugate gradient method for optimization. 
00216 
00217 Vector<double> ConjugateGradient::getInitialArgument(void)
00218 {
00219    return(initialArgument);
00220 }
00221 
00222 
00223 // double getGradientNormGoal(void) method
00224 
00225 /// This method returns the goal value for the norm of the objective function
00226 /// gradient.
00227 /// This is used as a stopping criterium when optimizing a function.
00228 
00229 double ConjugateGradient::getGradientNormGoal(void)
00230 {
00231    return(gradientNormGoal);
00232 }
00233 
00234 
00235 // int getMaximumNumberOfIterations(void) method
00236 
00237 /// This method returns the maximum number of iterations to be performed by the 
00238 /// conjugate gradient method during the optimization process. 
00239 /// This is used as a stopping criterium when optimizing an objective function.
00240 
00241 int ConjugateGradient::getMaximumNumberOfIterations(void)
00242 {
00243    return(maximumNumberOfIterations);
00244 }
00245 
00246 
00247 // double getFirstStepSize(void) method
00248 
00249 /// This method returns the initial step size for line search
00250 /// in the first iteration of the conjugate gradient.
00251 ///
00252 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00253 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00254 
00255 double ConjugateGradient::getFirstStepSize(void)
00256 {
00257    return(firstStepSize);
00258 }
00259 
00260 
00261 // double getOptimalStepSizeTolerance(void) method
00262 
00263 /// This method returns the tolerance value in line search.
00264 ///
00265 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00266 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00267 
00268 double ConjugateGradient::getOptimalStepSizeTolerance(void)
00269 {
00270    return(optimalStepSizeTolerance);
00271 }
00272 
00273 
00274 // double getWarningStepSize(void) method
00275 
00276 /// This method returns the step size value at wich a warning message is
00277 /// written to the screen during line search.
00278 ///
00279 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00280 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00281 
00282 double ConjugateGradient::getWarningStepSize(void)
00283 {
00284    return(warningStepSize);
00285 }
00286 
00287 
00288 // int getShowPeriod(void) method
00289 
00290 /// This method returns the number of iterations between the optimization 
00291 /// showing progress. 
00292 
00293 int ConjugateGradient::getShowPeriod(void)
00294 {
00295    return(showPeriod);    
00296 }
00297 
00298 
00299 // void setSearchDirectionMethod(SearchDirectionMethod) method
00300 
00301 /// This method sets a new search direction method to be used for otpimization
00302 /// with the conjugate gradient method.
00303 ///
00304 /// @param newSearchDirectionMethod: Search direction method.
00305 ///
00306 /// @see getFletcherReevesSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00307 /// @see getPolakRibiereSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00308 
00309 void ConjugateGradient::setSearchDirectionMethod
00310 (ConjugateGradient::SearchDirectionMethod newSearchDirectionMethod)
00311 {
00312    searchDirectionMethod = newSearchDirectionMethod;
00313 }
00314 
00315 
00316 // void setOptimalStepSizeMethod(StepSizeMethod) method
00317 
00318 /// This method sets a new optimal step size method to be used for optimization
00319 /// with the conjugate gradient method.
00320 ///
00321 /// @param newOptimalStepSizeMethod: Optimal step size method.
00322 ///
00323 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00324 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00325 
00326 void ConjugateGradient::setOptimalStepSizeMethod
00327 (ConjugateGradient::OptimalStepSizeMethod newOptimalStepSizeMethod)
00328 {
00329    optimalStepSizeMethod = newOptimalStepSizeMethod;
00330 }
00331 
00332 
00333 // void setInitialArgument(Vector<double>) method
00334 
00335 /// This method sets a new initial objective function argument to be used by 
00336 /// the conjugate gradient method for optimization. 
00337 ///
00338 /// @param newInitialArgument: Initial argument Vector.
00339 
00340 void ConjugateGradient::setInitialArgument(Vector<double> newInitialArgument)
00341 {
00342    int size = newInitialArgument.getSize();
00343 
00344    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00345 
00346    if(size != numberOfVariables)
00347    {
00348       std::cout << std::endl
00349                 << "Error: ConjugateGradient class. "
00350                 << "double setInitialArgument(Vector<double>) method." << std::endl
00351                 << "Size of initial argument must be equal to number of variables."
00352                 << std::endl << std::endl;
00353 
00354       exit(1);
00355    }
00356 
00357    initialArgument = newInitialArgument;
00358 }
00359 
00360 
00361 // void setGradientNormGoal(double) method
00362 
00363 /// This method sets a new the goal value for the norm of the 
00364 /// objective function gradient. 
00365 /// This is used as a stopping criterium when optimizing an objective function.
00366 ///
00367 /// @param newGradientNormGoal: 
00368 /// Goal value for the norm of the objective function gradient.
00369 
00370 void ConjugateGradient::setGradientNormGoal(double newGradientNormGoal)
00371 {
00372    if(gradientNormGoal < 0.0)
00373    {
00374       std::cout << std::endl
00375                 << "Error: ConjugateGradient class." << std::endl
00376                 << "void setGradientNormGoal(double) method."
00377                 << std::endl
00378                 << "Gradient norm goal must be equal or greater than 0."
00379                 << std::endl << std::endl;
00380       exit(1);
00381    }
00382 
00383    // Set gradient norm goal
00384 
00385    gradientNormGoal = newGradientNormGoal;
00386 }
00387 
00388 
00389 // void setMaximumNumberOfIterations(int) method
00390 
00391 /// This method sets a new maximum number of conjugate gradient iterations for
00392 /// optimization.  
00393 ///
00394 /// @param newMaximumNumberOfIterations: Maximum number of iterations.
00395 
00396 void ConjugateGradient
00397 ::setMaximumNumberOfIterations(int newMaximumNumberOfIterations)
00398 {
00399    if(newMaximumNumberOfIterations <= 0)
00400    {
00401       std::cout << std::endl
00402                 << "Error: ConjugateGradient class." << std::endl
00403                 << "void setMaximumNumberOfIterations(int) method."
00404                 << std::endl
00405                 << "Maximum number of iterations must be greater than 0."
00406                 << std::endl
00407                 << std::endl;
00408 
00409       exit(1);
00410    }
00411 
00412    maximumNumberOfIterations = newMaximumNumberOfIterations;
00413 }
00414 
00415 
00416 // void setShowPeriod(int) method
00417 
00418 /// This sets a new number of iterations between the optimization showing progress. 
00419 ///
00420 /// @param newShowPeriod: Show period.
00421 
00422 void ConjugateGradient::setShowPeriod(int newShowPeriod)
00423 {
00424    if(newShowPeriod <= 0)
00425    {
00426       std::cout << std::endl
00427                 << "Error: ConjugateGradient class." << std::endl
00428                 << "void setShowPeriod(int) method."
00429                 << std::endl
00430                 << "Show period must be greater than 0."
00431                 << std::endl << std::endl;
00432 
00433       exit(1);
00434    }
00435    else
00436    {
00437       showPeriod = newShowPeriod;
00438    }
00439 }
00440 
00441 
00442 // void setFirstStepSize(double) method
00443 
00444 /// This method sets a new initial step size for line search
00445 /// in the first iteration of the conjugate gradient.
00446 ///
00447 /// @param newFirstStepSize: First step size.
00448 ///
00449 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00450 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00451 
00452 void ConjugateGradient::setFirstStepSize(double newFirstStepSize)
00453 { 
00454    if(newFirstStepSize <= 0.0)
00455    {
00456       std::cout << std::endl
00457                 << "Error: ConjugateGradient class." << std::endl
00458                 << "void setFirstStepSize(double) method."
00459                 << std::endl
00460                 << "First step size must be greater than 0." << std::endl
00461                 << std::endl;
00462 
00463       exit(1);
00464    }
00465 
00466    // Set first step size
00467 
00468    firstStepSize = newFirstStepSize;
00469 }
00470 
00471 
00472 // void setToleranceInOptimalStepSize(double) method
00473 
00474 /// This method sets a new tolerance value to be used in line line search.
00475 ///
00476 /// @param newOptimalStepSizeTolerance: Tolerance value.
00477 ///
00478 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00479 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00480 
00481 void ConjugateGradient
00482 ::setOptimalStepSizeTolerance(double newOptimalStepSizeTolerance)
00483 {
00484    if(optimalStepSizeTolerance <= 0.0)
00485    {
00486       std::cout << std::endl
00487                 << "Error: ConjugateGradient class. "
00488                 << "void setOptimalStepSizeTolerance(double) method."
00489                 << std::endl
00490                 << "Tolerance must be greater than 0." << std::endl
00491                 << std::endl;
00492 
00493       exit(1);
00494    }
00495    else
00496    {
00497       optimalStepSizeTolerance = newOptimalStepSizeTolerance;
00498    }
00499 }
00500 
00501 
00502 // void setWarningStepSize(double) method
00503 
00504 /// This method sets a new step size value at wich a warning message is written
00505 /// to the screen during line search.
00506 ///
00507 /// @param newWarningStepSize: Warning step size value.
00508 ///
00509 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00510 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00511 
00512 void ConjugateGradient::setWarningStepSize(double newWarningStepSize)
00513 {
00514    if(newWarningStepSize <= 0.0)
00515    {
00516       std::cout << std::endl
00517                 << "Error: ConjugateGradient class. " << std::endl
00518                 << "void setWarningStepSize(double) method." << std::endl
00519                 << "Warning step size must be greater than 0." << std::endl
00520                 << std::endl;
00521 
00522       exit(1);
00523    }
00524 
00525    warningStepSize = newWarningStepSize;
00526 }
00527 
00528 
00529 // double getFletcherReevesParameter(Vector<double>, Vector<double>) method
00530 
00531 /// This method returns the Fletcher-Reeves parameter used to get the search
00532 /// direction.
00533 ///
00534 /// @param oldGradient: Previous objective function gradient.
00535 /// @param gradient: Current objective function gradient.
00536 ///
00537 /// @see getFletcherReevesSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00538 
00539 double ConjugateGradient
00540 ::getFletcherReevesParameter(Vector<double> oldGradient, Vector<double> gradient)
00541 {
00542    double FletcherReevesParameter = 0.0;
00543 
00544    int n = gradient.getSize();
00545 
00546    double numerator = 0.0;
00547    double denominator = 0.0;
00548 
00549    for (int i = 0; i < n; i++)
00550    {
00551       numerator += pow(gradient[i], 2);
00552 
00553       denominator += pow(oldGradient[i], 2);
00554    }
00555 
00556    // Prevent a possible division by 0
00557 
00558    if (denominator == 0.0)
00559    {
00560       FletcherReevesParameter = 0.0;
00561    }
00562    else
00563    {
00564       FletcherReevesParameter = numerator/denominator;
00565    }
00566 
00567    // Bound the Fletcher-Reeves parameter between 0 and 1
00568 
00569    if (FletcherReevesParameter < 0.0)
00570    {
00571       FletcherReevesParameter = 0.0;
00572    }
00573    else if (FletcherReevesParameter > 1.0)
00574    {
00575       FletcherReevesParameter = 1.0;
00576    }
00577 
00578    return(FletcherReevesParameter);
00579 }
00580 
00581 
00582 // double getPolakRibiereParameter(Vector<double>, Vector<double>) method
00583 
00584 /// This method returns the Polak-Ribiere parameter used to get the search
00585 /// direction.
00586 ///
00587 /// @param oldGradient: Previous objective function gradient.
00588 /// @param gradient: Current objective function gradient.
00589 ///
00590 /// @see getPolakRibiereSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00591 
00592 double ConjugateGradient
00593 ::getPolakRibiereParameter(Vector<double> oldGradient, Vector<double> gradient)
00594 {
00595    int n = gradient.getSize();
00596 
00597    double PolakRibiereParameter = 0.0;
00598 
00599    double numerator = 0.0;
00600    double denominator = 0.0;
00601 
00602    for (int i = 0; i < n; i++)
00603    {
00604       numerator += (gradient[i] - oldGradient[i])*gradient[i];
00605       denominator += pow(oldGradient[i], 2);
00606    }
00607 
00608    // Prevent a possible division by 0
00609 
00610    if (denominator == 0.0)
00611    {
00612       PolakRibiereParameter = 0.0;
00613    }
00614    else
00615    {
00616       PolakRibiereParameter = numerator/denominator;
00617    }
00618 
00619    // Bound the Polak-Ribiere parameter between 0 and 1
00620 
00621    if (PolakRibiereParameter < 0.0)
00622    {
00623       PolakRibiereParameter = 0.0;
00624    } 
00625    else if (PolakRibiereParameter > 1.0)
00626    {
00627       PolakRibiereParameter = 1.0;
00628    }
00629 
00630    return(PolakRibiereParameter);
00631 }
00632 
00633 
00634 // Vector<double> getFletcherReevesSearchDirection
00635 // (Vector<double>, Vector<double>, Vector<double>) method
00636 
00637 /// This method returns the search direction using the Fletcher-Reeves
00638 /// update.
00639 ///
00640 /// @param oldGradient: Previous objective function gradient.
00641 /// @param gradient: Current objective function gradient.
00642 /// @param oldSearchDirection: Previous search direction Vector.
00643 ///
00644 /// @see getFletcherReevesParameter(Vector<double>, Vector<double>).
00645 /// @see getPolakRibiereParameter(Vector<double>, Vector<double>)
00646 /// @see getPolakRibiereSearchDirection(Vector<double>, Vector<double>, Vector<double>)
00647 
00648 Vector<double> ConjugateGradient::getFletcherReevesSearchDirection
00649 (Vector<double> oldGradient, Vector<double> gradient, Vector<double> oldSearchDirection)
00650 {
00651    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00652 
00653    Vector<double> FletcherReevesSearchDirection(numberOfVariables, 0.0);
00654 
00655    double FletcherReevesParameter = getFletcherReevesParameter(oldGradient, gradient);
00656 
00657    for (int i = 0; i < numberOfVariables; i++)
00658    {
00659       FletcherReevesSearchDirection[i]
00660       = -1.0*gradient[i] + FletcherReevesParameter*oldSearchDirection[i];
00661    }
00662 
00663    return(FletcherReevesSearchDirection);
00664 }
00665 
00666 
00667 // Vector<double> getPolakRibiereSearchDirection(Vector<double>, Vector<double>,
00668 // Vector<double>) method
00669 
00670 /// This method returns the search direction using the Polak-Ribiere
00671 /// update.
00672 ///
00673 /// @param oldGradient: Previous objective function gradient.
00674 /// @param gradient: Current objective function gradient.
00675 /// @param oldSearchDirection: Previous search direction Vector.
00676 ///
00677 /// @see getPolakRibiereParameter(Vector<double>, Vector<double>).
00678 /// @see getFletcherReevesParameter(Vector<double>, Vector<double>).
00679 /// @see getFletcherReevesSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00680 
00681 Vector<double> ConjugateGradient::getPolakRibiereSearchDirection
00682 (Vector<double> oldGradient, Vector<double> gradient, Vector<double> oldSearchDirection)
00683 {
00684    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00685 
00686    Vector<double> PolakRibiereSearchDirection(numberOfVariables, 0.0);
00687 
00688    double PolakRibiereParameter = getPolakRibiereParameter(oldGradient, gradient);
00689 
00690    for (int i = 0; i < numberOfVariables; i++)
00691    {
00692       PolakRibiereSearchDirection[i]
00693       = -1.0*gradient[i] + PolakRibiereParameter*oldSearchDirection[i];
00694    }
00695 
00696    return(PolakRibiereSearchDirection);
00697 }
00698 
00699 
00700 // double getGoldenSectionOptimalStepSize(double, Vector<double>, Vector<double>, double)
00701 // method
00702 
00703 /// This method returns the optimal step size by searching in a given
00704 /// direction to locate the minimum of the objective function in that
00705 /// direction. It uses the golden section method.
00706 ///
00707 /// @param initialStepSize: Initial step size in line search.
00708 /// @param evaluation: Objective function evaluation value.
00709 /// @param argument: Objective function argument Vector.
00710 /// @param searchDirection: Search direction Vector.
00711 ///
00712 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00713  
00714 double ConjugateGradient::getGoldenSectionOptimalStepSize
00715 (double initialStepSize, double evaluation,
00716 Vector<double> argument, Vector<double> searchDirection)
00717 {
00718    double optimalStepSize = 0.0;
00719 
00720    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00721 
00722    Vector<double> potentialArgument(numberOfVariables, 0.0);
00723 
00724    double a = 0.0;
00725    double evaluationA = 0.0;
00726    double b = 0.0;
00727    double evaluationB = 0.0;
00728 
00729    double c = 0.0;      
00730    double evaluationC = 0.0;
00731    double d = 0.0;      
00732    double evaluationD = 0.0;
00733 
00734    double tau = (3.0-sqrt(5.0))/2.0; // 0.382
00735 
00736    // Start golden section search
00737 
00738    // Set initial a point
00739 
00740    a = 0.0;
00741 
00742    // Calculate evaluation for a
00743 
00744    evaluationA = evaluation;
00745 
00746    // Set initial b point
00747 
00748    b = initialStepSize;
00749 
00750    // Calculate evaluation for b
00751 
00752    for (int i = 0; i < numberOfVariables; i++)
00753    {
00754       potentialArgument[i] = argument[i] + searchDirection[i]*b;
00755    }
00756 
00757    evaluationB = objectiveFunction->getEvaluation(potentialArgument);
00758 
00759    // Find initial interval where minimum evaluation occurs
00760 
00761    while(evaluationA > evaluationB)
00762    {
00763       // Set new b
00764 
00765       b = 2.0*b;
00766 
00767       if(b >= warningStepSize)
00768       {
00769          std::cout << std::endl
00770                    << "Warning: Step size is " << b
00771                    << std::endl;
00772       }
00773 
00774       // Calculate evaluation for new b
00775 
00776       for (int i = 0; i < numberOfVariables; i++)
00777       {
00778          potentialArgument[i] = argument[i] + searchDirection[i]*b;
00779       }
00780 
00781       evaluationB = objectiveFunction->getEvaluation(potentialArgument);
00782    }
00783 
00784    // Initialize c and d (interior points for line minimization)
00785 
00786    // Initialize c point
00787 
00788    c = a + tau*(b-a);
00789 
00790    // Calculate evaluation for c
00791 
00792    for (int i = 0; i < numberOfVariables; i++)
00793    {
00794       potentialArgument[i] = argument[i] + searchDirection[i]*c;
00795    }
00796 
00797    evaluationC = objectiveFunction->getEvaluation(potentialArgument);
00798 
00799    // Initialize d point
00800 
00801    d = b - tau*(b-a);
00802 
00803    // Calculate evaluation for d
00804 
00805    for (int i = 0; i < numberOfVariables; i++)
00806    {
00807       potentialArgument[i] = argument[i] + searchDirection[i]*d;
00808    }
00809 
00810    evaluationD = objectiveFunction->getEvaluation(potentialArgument);
00811 
00812    // Reduce the interval with the golden section algorithm
00813 
00814    while(b-a > optimalStepSizeTolerance)
00815    {
00816       Vector<double> evaluationVectorLeft(3, 0.0);
00817       evaluationVectorLeft[0] = evaluationA;
00818       evaluationVectorLeft[1] = evaluationC;
00819       evaluationVectorLeft[2] = evaluationD;
00820 
00821       double minimumEvaluationLeft = getMinimum(evaluationVectorLeft);
00822 
00823       Vector<double> evaluationVectorRight(3, 0.0);
00824       evaluationVectorRight[0] = evaluationB;
00825       evaluationVectorRight[1] = evaluationC;
00826       evaluationVectorRight[2] = evaluationD;
00827 
00828       double minimumEvaluationRight = getMinimum(evaluationVectorRight);
00829 
00830       if((evaluationC <= evaluationD && evaluationB >= minimumEvaluationLeft)
00831       || (evaluationA <=  minimumEvaluationRight))
00832 
00833       // There is a minimum between a and b
00834       {
00835          b=d; 
00836          d=c; 
00837 
00838          evaluationB = evaluationD;
00839          evaluationD = evaluationC;
00840 
00841          // Set new c point
00842 
00843          c = a + tau*(b-a);
00844 
00845          // Calculate evaluation for new c
00846 
00847          for (int i = 0; i < numberOfVariables; i++)
00848          {
00849             potentialArgument[i] = argument[i] + searchDirection[i]*c;
00850          }
00851 
00852          evaluationC = objectiveFunction->getEvaluation(potentialArgument);
00853       }
00854       else if((evaluationD <= evaluationC && evaluationA >= minimumEvaluationRight)
00855       || (evaluationB <= minimumEvaluationLeft))
00856 
00857       // There is a minimum between c and b
00858       {
00859          a = c; 
00860          c = d; 
00861 
00862          evaluationA = evaluationC;
00863          evaluationC = evaluationD;
00864 
00865          // Set new d point
00866 
00867          d = b - tau*(b-a);
00868 
00869          // Calculate evaluation for new d
00870 
00871          for (int i = 0; i < numberOfVariables; i++)
00872          {
00873             potentialArgument[i] = argument[i] + searchDirection[i]*d;
00874          }
00875 
00876          evaluationD = objectiveFunction->getEvaluation(potentialArgument);
00877       }
00878       else
00879       {
00880          std::cout << std::endl
00881                    << "Error: ConjugateGradient class." << std::endl
00882                    << "double getGoldenSectionStepSize "
00883                    << "(double, double, Vector<double>, Vector<double>, double) method."
00884                    << std::endl
00885                    << "Unable to find were the minimum is." << std::endl;
00886 
00887          exit(1);
00888       }
00889    }
00890 
00891    // Get minimum evaluation and optimal step size among points A, B, C and D
00892 
00893    double minimumEvaluation = evaluation;
00894 
00895    if(evaluationA < minimumEvaluation)
00896    {
00897       minimumEvaluation  = evaluationA;
00898       optimalStepSize = a;
00899    }
00900    else if(evaluationB < minimumEvaluation)
00901    {
00902       minimumEvaluation = evaluationB;
00903       optimalStepSize = b;
00904    }
00905    else if(evaluationC < minimumEvaluation)
00906    {
00907       minimumEvaluation = evaluationC;
00908       optimalStepSize = c;
00909    }
00910    else if(evaluationD < minimumEvaluation)
00911    {
00912       minimumEvaluation = evaluationD;
00913       optimalStepSize = d;
00914    }
00915 
00916    return(optimalStepSize);
00917 }
00918 
00919 
00920 // double getBrentMethodOptimalStepSize(double, Vector<double>, Vector<double>, double)
00921 // method
00922 
00923 /// This method returns the optimal step size by searching in a given
00924 /// direction to locate the minimum of the objective function in that
00925 /// direction. It uses the Brent's method.
00926 ///
00927 /// @param initialStepSize: Initial step size in line search.
00928 /// @param evaluation: Objective function evaluation value.
00929 /// @param argument: Objective function argument Vector.
00930 /// @param searchDirection: Search direction Vector.
00931 ///
00932 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00933 
00934 double ConjugateGradient::getBrentMethodOptimalStepSize
00935 (double initialStepSize, double evaluation,
00936 Vector<double> argument, Vector<double> searchDirection)
00937 {
00938    double optimalStepSize = 0.0;
00939 
00940    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00941 
00942    Vector<double> potentialArgument(numberOfVariables, 0.0);
00943 
00944    double a = 0.0;
00945    double evaluationA = 0.0;
00946    double b = 0.0;
00947    double evaluationB = 0.0;
00948 
00949    double u = 0.0;
00950    double evaluationU = 0.0;
00951    double v = 0.0;
00952    double evaluationV = 0.0;
00953    double w = 0.0;
00954    double evaluationW = 0.0;
00955    double x = 0.0;
00956    double evaluationX = 0.0;
00957 
00958    double tau = (3.0-sqrt(5.0))/2.0; // 0.382
00959 
00960    // Set initial a point
00961 
00962    a = 0.0;
00963 
00964    // Calculate evaluation for a
00965 
00966    evaluationA = evaluation;
00967 
00968    // Set initial b point
00969 
00970    b = initialStepSize;
00971 
00972    // Calculate evaluation for b
00973 
00974    for (int i = 0; i < numberOfVariables; i++)
00975    {
00976       potentialArgument[i] = argument[i] + searchDirection[i]*b;
00977    }
00978 
00979    evaluationB = objectiveFunction->getEvaluation(potentialArgument);
00980 
00981    // Find initial interval where minimum evaluation occurs
00982 
00983    while(evaluationA > evaluationB)
00984    {
00985       // Set new b
00986 
00987       b = 2.0*b;
00988 
00989       if(b >= warningStepSize)
00990       {
00991          std::cout << std::endl
00992                    << "Warning: Step size is  " << b
00993                    << std::endl;
00994       }
00995 
00996       // Calculate evaluation for new b
00997 
00998       for (int i = 0; i < numberOfVariables; i++)
00999       {
01000          potentialArgument[i] = argument[i] + searchDirection[i]*b;
01001       }
01002 
01003       evaluationB = objectiveFunction->getEvaluation(potentialArgument);
01004    }
01005 
01006    // Get intermediate point V
01007 
01008    v = a + tau*(b-a);
01009 
01010    // Calculate evaluation for V
01011 
01012    for (int i = 0; i < numberOfVariables; i++)
01013    {
01014       potentialArgument[i] = argument[i] + searchDirection[i]*v;
01015    }
01016 
01017    evaluationV = objectiveFunction->getEvaluation(potentialArgument);
01018 
01019    // Set initial W and X points
01020 
01021    w = v;
01022    evaluationW = evaluationV;
01023 
01024    x = v;
01025    evaluationX = evaluationV;
01026 
01027    // Maximum and minimum intervals ???
01028 
01029    double intervalLength = 0.0;
01030 
01031    bool goldenSection = false;
01032 
01033    // Reduce the interval
01034 
01035    while(b-a > optimalStepSizeTolerance)
01036    {
01037       // Quadratic interpolation
01038 
01039       if(w != x && w != v && x != v) // Can construct parabola
01040       {
01041          // zz vector
01042 
01043          Vector<double> stepSizeVector(3, 0.0);
01044          stepSizeVector[0] = v;
01045          stepSizeVector[1] = w;
01046          stepSizeVector[2] = x;
01047 
01048          std::sort(stepSizeVector.begin(), stepSizeVector.end(), std::less<double>());
01049 
01050          // pp vector
01051 
01052          Vector<double> evaluationVector(3, 0.0);
01053 
01054          for(int i = 0; i < 3; i++)
01055          {
01056             if(stepSizeVector[i] == v)
01057             {
01058                evaluationVector[i] = evaluationV;
01059             }
01060             else if(stepSizeVector[i] == w)
01061             {
01062                stepSizeVector[i] = evaluationW;
01063             }
01064             else if(stepSizeVector[i] == x)
01065             {
01066                stepSizeVector[i] = evaluationX;
01067             }
01068             else
01069             {
01070                std::cout << std::endl
01071                          << "Error: ConjugateGradient class." << std::endl
01072                          << "double getBrentMethodOptimalStepSize" << std::endl
01073                          << "(double, double, Vector<double>, Vector<double>) method."
01074                          << std::endl
01075                          << "Unable to construct step size and evaluation vectors right."
01076                          << std::endl << std::endl;
01077 
01078                exit(1);
01079             }
01080          }
01081 
01082          // xStar is the minimum of the parabola through the three step size points
01083 
01084          double numerator
01085          = (pow(stepSizeVector[2],2) - pow(stepSizeVector[1],2))*evaluationVector[0]
01086          + (pow(stepSizeVector[1],2) - pow(stepSizeVector[0],2))*evaluationVector[2]
01087          + (pow(stepSizeVector[0],2) - pow(stepSizeVector[2],2))*evaluationVector[1];
01088 
01089          double denominator
01090          = (stepSizeVector[2] - stepSizeVector[1])*evaluationVector[0]
01091          + (stepSizeVector[1] - stepSizeVector[0])*evaluationVector[2]
01092          + (stepSizeVector[0] - stepSizeVector[2])*evaluationVector[1];
01093 
01094          double xStar = 0.5*numerator/denominator;
01095 
01096          if(xStar < b && a < xStar) // xStar is in [a,b]
01097          {
01098             u = xStar;
01099 
01100             // Good, no need to perform golden section
01101 
01102             goldenSection = false;
01103          }
01104          else // xStar is not in [a,b]
01105          {
01106             // Bad, need to perform golden section
01107 
01108             goldenSection = true;
01109          }
01110       }
01111       else // Cannot construct parabola
01112       {
01113          // Bad, need to perform golden section
01114 
01115          goldenSection = true;
01116       }
01117 
01118       //
01119       // Golden section
01120       //
01121 
01122       if(goldenSection == true)
01123       {
01124          if(x > (a+b)/2.0)
01125          {
01126             u = x-tau*(x-a);
01127          }
01128          else
01129          {
01130             u = x+tau*(b-x);
01131          }
01132       }
01133 
01134       // Calculate evaluation for U
01135 
01136       for (int i = 0; i < numberOfVariables; i++)
01137       {
01138          potentialArgument[i] = argument[i] + searchDirection[i]*u;
01139       }
01140 
01141       evaluationU = objectiveFunction->getEvaluation(potentialArgument);
01142 
01143       // Update points
01144 
01145       if(evaluationU < evaluationX)
01146       {
01147          if(u < x)
01148          {
01149             b = x;
01150             evaluationB = evaluationX;
01151          }
01152          else
01153          {
01154             a = x;
01155             evaluationA = evaluationX;
01156          }
01157 
01158          v = w;
01159          evaluationV = evaluationW;
01160 
01161          w = x;
01162          evaluationW = evaluationX;
01163 
01164          x = u;
01165          evaluationX = evaluationU;
01166       }
01167       else
01168       {
01169          if(u < x)
01170          {
01171             a = u;
01172             evaluationA = evaluationU;
01173          }
01174          else
01175          {
01176             b = u;
01177             evaluationB = evaluationU;
01178          }
01179 
01180          if((evaluationU < evaluationW) || (w == x))
01181          {
01182              v = w;
01183              evaluationV = evaluationW;
01184 
01185              w = u;
01186              evaluationW = evaluationU;
01187          }
01188          else if((evaluationU < evaluationV) || (v == x) || (v == w))
01189          {
01190             v = u;
01191             evaluationV = evaluationU;
01192          }
01193       }
01194    } // while loop
01195 
01196    // Get minimum evaluation and optimal step size among points A, B, V, W and X
01197 
01198    double minimum = evaluation;
01199 
01200    if(evaluationA < minimum)
01201    {
01202       minimum = evaluationA;
01203       optimalStepSize = a;
01204    }
01205    else if(evaluationB < minimum)
01206    {
01207       minimum = evaluationB;
01208       optimalStepSize = b;
01209    }
01210    else if(evaluationV < minimum)
01211    {
01212       minimum = evaluationV;
01213       optimalStepSize = v;
01214    }
01215    else if(evaluationW < minimum)
01216    {
01217       minimum = evaluationW;
01218       optimalStepSize = w;
01219    }
01220    else if(evaluationX < minimum)
01221    {
01222       minimum = evaluationX;
01223       optimalStepSize = x;
01224    }
01225 
01226    return(optimalStepSize);
01227 }
01228 
01229 
01230 // void getMinimalArgument(void) method
01231 
01232 /// This method optimizes an objective function according to the 
01233 /// conjugate gradient algorithm. 
01234 /// It returns the minimal argument of the objective function.
01235 /// Optimization occurs according to the optimization operators and the 
01236 /// optimization parameters.
01237 
01238 Vector<double> ConjugateGradient::getMinimalArgument(void)
01239 {
01240    int numberOfVariables = objectiveFunction->getNumberOfVariables();
01241 
01242    Vector<double> minimalArgument(numberOfVariables, 0.0);
01243    Vector<double> argument(numberOfVariables, 0.0);
01244 
01245    // Evaluation history vector
01246 
01247    Vector<double> newEvaluationHistory(maximumNumberOfIterations+1, 0.0);
01248    evaluationHistory = newEvaluationHistory;
01249 
01250    // Objective function gradient norm optimization history vector
01251 
01252    Vector<double> newGradientNormHistory(maximumNumberOfIterations+1, 0.0);
01253    gradientNormHistory = newGradientNormHistory;
01254 
01255    double evaluation = 0.0;
01256 
01257    Vector<double> gradient(numberOfVariables, 0.0);
01258    Vector<double> oldGradient(numberOfVariables, 0.0);
01259 
01260    double gradientNorm = 0.0;
01261 
01262    Vector<double> searchDirection(numberOfVariables, 0.0);
01263    Vector<double> oldSearchDirection(numberOfVariables, 0.0);
01264 
01265    double slope = 0.0;
01266 
01267    double initialStepSize = 0.0;
01268    double optimalStepSize = 0.0;
01269    double oldOptimalStepSize = 0.0;
01270 
01271    time_t beginningTime, currentTime;
01272    double elapsedTime = 0.0;
01273 
01274    // Set beginning optimization time 
01275 
01276    time(&beginningTime);
01277 
01278    std::cout << std::endl
01279              << "Getting minimal argument with conjugate gradient..." 
01280              << std::endl;
01281 
01282    argument = initialArgument;
01283     
01284    // Initial objective function evaluation
01285    
01286    evaluation = objectiveFunction->getEvaluation(argument);
01287 
01288    evaluationHistory[0] = evaluation;
01289 
01290    if(evaluation <= evaluationGoal)
01291    {          
01292       std::cout << std::endl
01293                 << "Initial evaluation is less than goal." << std::endl
01294                 << "Initial evaluation: " << evaluation << std::endl;
01295 
01296       objectiveFunction->print(argument);
01297             
01298       minimalArgument = argument;
01299 
01300       // Print minimal argument to screen
01301 
01302       std::cout << std::endl
01303                 << "Minimal argument:" << std::endl;
01304    
01305       for(int i = 0; i < numberOfVariables; i++)
01306       {
01307          std::cout << minimalArgument[i] << " ";        
01308       }
01309       
01310       return(minimalArgument);        
01311    }
01312    else
01313    {
01314       std::cout << "Initial evaluation: " <<  evaluation << std::endl;      
01315    }
01316 
01317    // Initial objective function gradient
01318 
01319    gradient = objectiveFunction->getGradient(argument);
01320 
01321    gradientNorm = objectiveFunction->getGradientNorm(gradient);
01322 
01323    gradientNormHistory[0] = gradientNorm;
01324 
01325    if(gradientNorm <= gradientNormGoal)
01326    {          
01327       std::cout << std::endl
01328                 << "Initial gradient norm is less than goal." << std::endl
01329                 << "Initial gradient norm: " << gradientNorm << std::endl;
01330 
01331       objectiveFunction->print(argument);
01332               
01333       minimalArgument = argument;
01334      
01335       // Print minimal argument to screen
01336 
01337       std::cout << std::endl
01338                 << "Minimal argument:" << std::endl;
01339    
01340       for(int i = 0; i < numberOfVariables; i++)
01341       {
01342          std::cout << minimalArgument[i] << " ";        
01343       }
01344       
01345       return(minimalArgument);        
01346    }
01347    else
01348    {
01349       std::cout << "Initial gradient norm: " <<  gradientNorm << std::endl;      
01350    }
01351 
01352    objectiveFunction->print(argument);
01353 
01354    // Loop over iterations
01355 
01356    for(int iteration = 1; iteration <= maximumNumberOfIterations; iteration++)
01357    {
01358 
01359       // Get search direction
01360 
01361       if(iteration % numberOfVariables == 0)
01362       {
01363          // Search direction is set to negative gradient
01364 
01365          for(int i = 0; i < numberOfVariables; i++)
01366          {
01367             searchDirection[i] = -1.0*gradient[i];
01368          } 
01369       }
01370       else
01371       {
01372          // Conjugate gradient search direction
01373 
01374          switch(searchDirectionMethod)
01375          {
01376             case FletcherReeves:
01377 
01378                searchDirection = getFletcherReevesSearchDirection
01379                (oldGradient, gradient, oldSearchDirection);
01380 
01381                break;
01382 
01383             case PolakRibiere:
01384 
01385               searchDirection = getPolakRibiereSearchDirection
01386               (oldGradient, gradient, oldSearchDirection);
01387 
01388                break;
01389          }
01390       }
01391 
01392       // Calculate slope
01393 
01394       slope = 0.0;
01395 
01396       for(int i = 0; i < numberOfVariables; i++)
01397       {
01398          slope += gradient[i]*searchDirection[i];
01399       }
01400 
01401       // Check for a descent direction 
01402 
01403       if(slope >= 0.0)
01404       {
01405          std::cout << std::endl
01406                    << "Warning: Function slope is equal or greater than zero."
01407                    << std::endl
01408                    << "Search direction reset to negative gradient."
01409                    << std::endl;
01410 
01411          // Reset search direction
01412 
01413          for(int i = 0; i < numberOfVariables; i++)
01414          {
01415             searchDirection[i] = -1.0*gradient[i];
01416          }
01417 
01418          // Calculate new slope
01419 
01420          slope = 0.0;
01421 
01422          for(int i = 0; i < numberOfVariables; i++)
01423          {
01424             slope += gradient[i]*searchDirection[i];
01425          }
01426       }
01427 
01428       // Get optimal step size
01429 
01430       if(iteration == 1)
01431       {
01432          initialStepSize = firstStepSize;
01433       }
01434       else
01435       {
01436          initialStepSize = oldOptimalStepSize;
01437       }
01438 
01439       switch(optimalStepSizeMethod)
01440       {
01441          case GoldenSection:
01442 
01443             optimalStepSize = getGoldenSectionOptimalStepSize
01444             (initialStepSize, evaluation, argument, searchDirection);
01445 
01446 
01447             break;
01448 
01449          case BrentMethod:
01450 
01451             optimalStepSize = getBrentMethodOptimalStepSize
01452             (initialStepSize, evaluation, argument, searchDirection);
01453 
01454             break;
01455       }
01456 
01457       // Optimal step size stopping criterium
01458 
01459       if(optimalStepSize == 0.0)
01460       {
01461          std::cout << std::endl
01462                    << "Iteration " << iteration << ": "
01463                    << "Optimal step size is zero." << std::endl;
01464 
01465          std::cout << "Final evaluation: " << evaluation << ";" << std::endl;
01466          std::cout << "Final gradient norm: " << gradientNorm << ";" << std::endl;
01467 
01468          objectiveFunction->print(argument);
01469 
01470          break;
01471       }
01472 
01473       // Get new free parameters
01474 
01475       for (int i = 0; i < numberOfVariables; i++)
01476       {
01477          argument[i] += optimalStepSize*searchDirection[i];
01478       }
01479 
01480 
01481       // Objective function evaluation
01482    
01483       evaluation = objectiveFunction->getEvaluation(argument);
01484 
01485       evaluationHistory[iteration] = evaluation;
01486 
01487       // Objective function gradient
01488 
01489       gradient = objectiveFunction->getGradient(argument);
01490 
01491       gradientNorm = objectiveFunction->getGradientNorm(gradient);
01492 
01493       gradientNormHistory[iteration] = gradientNorm;
01494 
01495       // Stopping Criteria
01496 
01497       // Evaluation goal 
01498 
01499       if (evaluation <= evaluationGoal)
01500       {
01501             std::cout << std::endl
01502                       << "Iteration " << iteration << ": "
01503                       << "Evaluation goal reached." << std::endl;
01504 
01505             std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01506             std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01507 
01508             objectiveFunction->print(argument);
01509 
01510             break;
01511       }
01512 
01513       // Norm of objective function gradient goal 
01514 
01515       if (gradientNorm <= gradientNormGoal)
01516       {
01517          std::cout << std::endl
01518                    << "Iteration " << iteration << ": "
01519                    << "Gradient norm goal reached."
01520                    << std::endl;  
01521 
01522          std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01523          std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01524 
01525          objectiveFunction->print(argument);
01526 
01527          break;
01528       }
01529 
01530       // Maximum optimization time
01531 
01532       time(&currentTime);
01533 
01534       elapsedTime = difftime(currentTime, beginningTime);
01535 
01536       if (elapsedTime >= maximumTime)
01537       {
01538          std::cout << std::endl
01539                    << "Iteration " << iteration << ": "
01540                    << "Maximum optimization time reached."
01541                    << std::endl;
01542 
01543          std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01544          std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01545 
01546          objectiveFunction->print(argument);
01547 
01548          break;
01549       }
01550 
01551       // Maximum number of iterations
01552 
01553       if (iteration == maximumNumberOfIterations)
01554       {
01555          std::cout << std::endl
01556                    << "Iteration " << iteration << ": "
01557                    << "Maximum number of iterations reached."
01558                    << std::endl;
01559 
01560          std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01561          std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01562 
01563          objectiveFunction->print(argument);
01564 
01565          break;
01566       }
01567 
01568       // Progress
01569 
01570       if(iteration % showPeriod == 0)
01571       {
01572          std::cout << std::endl
01573                    << "Iteration " << iteration << "; " << std::endl;
01574 
01575          std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01576          std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01577          
01578          objectiveFunction->print(argument);
01579       }
01580 
01581       // Update
01582 
01583       oldGradient = gradient;
01584       oldSearchDirection = searchDirection;
01585       oldOptimalStepSize = optimalStepSize;
01586    }
01587 
01588    // Set minimal argument
01589 
01590    minimalArgument = argument;
01591 
01592    // Print minimal argument to screen
01593 
01594    std::cout << std::endl
01595              << "Minimal argument:" << std::endl;
01596    
01597    for(int i = 0; i < numberOfVariables; i++)
01598    {
01599       std::cout << minimalArgument[i] << " ";        
01600    }
01601 
01602    std::cout << std::endl; 
01603 
01604    return(minimalArgument);
01605 }
01606 
01607 
01608 // void print(void) method
01609 
01610 /// This method prints to the screen the optimization operators and
01611 /// the optimization parameters concerning the conjugate gradient object:
01612 ///
01613 /// Initial argument.
01614 ///
01615 /// Optimization operators:
01616 /// <ul>
01617 /// <li> Search direction method.
01618 /// <li> Optimal step size method.
01619 /// </ul>
01620 ///
01621 /// Optimization parameters:
01622 /// <ul>
01623 /// <li> First step size.
01624 /// <li> Optimal step size tolerance.
01625 /// </ul>
01626 ///
01627 /// Stopping criteria:
01628 /// <ul> 
01629 /// <li> Evaluation goal.
01630 /// <li> Gradient norm goal.
01631 /// <li> Maximum time.
01632 /// <li> Maximum number of iterations. 
01633 /// </ul> 
01634 ///  
01635 /// User stuff: 
01636 /// <ul>
01637 /// <li> Warning step size.
01638 /// <li> Show period. 
01639 /// </ul>
01640 
01641 void ConjugateGradient::print(void)
01642 {
01643    std::cout << std::endl
01644              << "Conjugate Gradient Object." << std::endl; 
01645 
01646    int numberOfVariables = objectiveFunction->getNumberOfVariables();
01647 
01648    // Initial argument
01649 
01650    std::cout << std::endl
01651              << "Initial argument:" << std::endl;
01652 
01653    for(int i = 0; i < numberOfVariables; i++)
01654    {
01655       std::cout << initialArgument[i] << " ";        
01656    }
01657    
01658    std::cout << std::endl;
01659 
01660    // Optimization operators
01661 
01662    std::cout << std::endl
01663              << "Optimization operators:" << std::endl;
01664 
01665    // Search direction method
01666 
01667    std::cout << "Search direction method:" << std::endl;
01668 
01669    switch(searchDirectionMethod)
01670    {
01671       case FletcherReeves:
01672 
01673       std::cout << "Fletcher-Reeves" << std::endl;
01674 
01675       break;
01676 
01677       case PolakRibiere:
01678 
01679       std::cout << "Polak-Ribiere" << std::endl;
01680 
01681       break;
01682    }
01683 
01684 
01685    // Optimal step size method
01686 
01687    std::cout << "Optimal step size method:" << std::endl;
01688 
01689    switch(optimalStepSizeMethod)
01690    {
01691       case GoldenSection:
01692 
01693          std::cout << "Golden section" << std::endl;
01694 
01695       break;
01696 
01697       case BrentMethod:
01698 
01699          std::cout << "Brent Method" << std::endl;
01700 
01701       break;
01702    }
01703 
01704 
01705    // Optimization parameters
01706 
01707    std::cout << std::endl
01708              << "Optimization parameters: " << std::endl
01709              << "First step size: " << std::endl
01710              << firstStepSize << std::endl
01711              << "Optimal step size tolerance: " << std::endl
01712              << optimalStepSizeTolerance << std::endl;
01713 
01714    // Stopping criteria
01715 
01716    std::cout << std::endl
01717              << "Stopping criteria: " << std::endl
01718              << "Evaluation goal: " << std::endl
01719              << evaluationGoal << std::endl
01720              << "Gradient norm goal" << std::endl 
01721              << gradientNormGoal <<std::endl
01722              << "Maximum time: " << std::endl
01723              << maximumTime << std::endl
01724              << "Maximum number of iterations: " << std::endl
01725              << maximumNumberOfIterations << std::endl;
01726 
01727    // User stuff
01728 
01729    std::cout << std::endl
01730              << "User stuff: " << std::endl
01731              << "Warning step size: " << std::endl
01732              << warningStepSize << std::endl
01733              << "Show period: " << std::endl
01734              << showPeriod
01735              << std::endl;
01736 }
01737 
01738 
01739 // void save(char*) method
01740 
01741 /// This method saves the conjugate gradient object to a data file. 
01742 ///
01743 /// Initial argument.
01744 ///
01745 /// Optimization operators:
01746 /// <ul>
01747 /// <li> Search direction method.
01748 /// <li> Optimal step size method.
01749 /// </ul>
01750 ///
01751 /// Optimization parameters:
01752 /// <ul>
01753 /// <li> First step size.
01754 /// <li> Optimal step size tolerance.
01755 /// </ul>
01756 ///
01757 /// Stopping criteria:
01758 /// <ul> 
01759 /// <li> Evaluation goal.
01760 /// <li> Gradient norm goal.
01761 /// <li> Maximum time.
01762 /// <li> Maximum number of iterations. 
01763 /// </ul> 
01764 ///  
01765 /// User stuff: 
01766 /// <ul>
01767 /// <li> Warning step size.
01768 /// <li> Show period. 
01769 /// </ul>
01770 ///
01771 /// @param filename: Filename.
01772 ///
01773 /// @see load(char*).
01774 
01775 void ConjugateGradient::save(char* filename)
01776 {
01777    // File
01778 
01779    std::fstream file;
01780 
01781    file.open(filename, std::ios::out);
01782 
01783    if(!file.is_open())
01784    {
01785       std::cout << std::endl 
01786                 << "Error: ConjugateGradient class. " << std::endl
01787                 << "void save(char*) method." << std::endl
01788                 << "Cannot open conjugate gradient object data file."  << std::endl
01789                 << std::endl;
01790 
01791       exit(1);
01792    }
01793    else
01794    {
01795       std::cout << std::endl
01796                 << "Saving conjugate gradient object to data file..." << std::endl;
01797    }
01798 
01799    // Write file header
01800 
01801    file << "% Purple: An Open Source Numerical Optimization C++ Library." 
01802         << std::endl 
01803         << "% Conjugate Gradient Object." << std::endl; 
01804 
01805    int numberOfVariables = objectiveFunction->getNumberOfVariables();
01806  
01807    // Initial argument
01808 
01809    file << "InitialArgument:" << std::endl;
01810 
01811    for(int i = 0; i < numberOfVariables; i++)
01812    {
01813       file << initialArgument[i] << " ";        
01814    }
01815    
01816    file << std::endl;
01817 
01818    // Optimization operators
01819 
01820    // Search direction method
01821 
01822    file << "SearchDirectionMethod:" << std::endl;
01823 
01824 
01825    switch(searchDirectionMethod)
01826    {
01827       case FletcherReeves:
01828 
01829       file << "FletcherReeves" << std::endl;
01830 
01831       break;
01832 
01833       case PolakRibiere:
01834 
01835       file << "PolakRibiere" << std::endl;
01836 
01837       break;
01838    }
01839 
01840 
01841    // Optimal step size method
01842 
01843    file << "OptimalStepSizeMethod:" << std::endl;
01844 
01845    switch(optimalStepSizeMethod)
01846    {
01847       case GoldenSection:
01848 
01849       file << "GoldenSection" << std::endl;
01850 
01851       break;
01852 
01853       case BrentMethod:
01854 
01855       file << "BrentMethod" << std::endl;
01856 
01857       break;
01858    }
01859 
01860 
01861    // Optimization parameters
01862 
01863    file << "FirstStepSize:" << std::endl
01864         << firstStepSize << std::endl
01865         << "OptimalStepSizeTolerance:" << std::endl
01866         << optimalStepSizeTolerance << std::endl;
01867 
01868    // Stopping criteria
01869 
01870    file << "EvaluationGoal:" << std::endl
01871         << evaluationGoal << std::endl
01872         << "GradientNormGoal:" << std::endl
01873         << gradientNormGoal << std::endl
01874         << "MaximumTime: " << std::endl
01875         << maximumTime << std::endl
01876         << "MaximumNumberOfIterations: " << std::endl
01877         << maximumNumberOfIterations << std::endl;
01878 
01879    // User stuff
01880 
01881    file << "WarningStepSize: " << std::endl
01882         << warningStepSize << std::endl
01883         << "ShowPeriod: " << std::endl
01884         << showPeriod << std::endl;
01885 
01886    file.close();
01887 }
01888 
01889 
01890 // void load(char*) method
01891 
01892 /// This method loads a conjugate gradient object from a data file. 
01893 /// Please mind about the file format, wich is specified in the User's Guide. 
01894 ///
01895 ///
01896 /// Initial argument.
01897 ///
01898 /// Optimization operators:
01899 /// <ul>
01900 /// <li> Search direction method.
01901 /// <li> Optimal step size method.
01902 /// </ul>
01903 ///
01904 /// Optimization parameters:
01905 /// <ul>
01906 /// <li> First step size.
01907 /// <li> Optimal step size tolerance.
01908 /// </ul>
01909 ///
01910 /// Stopping criteria:
01911 /// <ul> 
01912 /// <li> Evaluation goal.
01913 /// <li> Gradient norm goal.
01914 /// <li> Maximum time.
01915 /// <li> Maximum number of iterations. 
01916 /// </ul> 
01917 ///  
01918 /// User stuff: 
01919 /// <ul>
01920 /// <li> Warning step size.
01921 /// <li> Show period. 
01922 /// </ul>
01923 ///
01924 /// @param filename: Filename.
01925 ///
01926 /// @see save(char*).
01927 
01928 void ConjugateGradient::load(char* filename)
01929 {
01930    // File
01931 
01932    std::fstream file;
01933 
01934    file.open(filename, std::ios::in);
01935 
01936    if(!file.is_open())
01937    {
01938       std::cout << std::endl
01939                 << "Error: ConjugateGradient class." << std::endl
01940                 << "void load(char*) method." << std::endl
01941                 << "Cannot open conjugate gradient object data file."  << std::endl;
01942 
01943       exit(1);
01944    }
01945    else
01946    {
01947       std::cout << std::endl
01948                 << "Loading conjugate gradient object from data file..."  << std::endl;
01949    }
01950 
01951    std::string word;
01952 
01953    // Initial argument
01954 
01955    while(word != "InitialArgument:")
01956    {
01957       file >> word;
01958    }
01959 
01960    int numberOfVariables = objectiveFunction->getNumberOfVariables();
01961 
01962    for(int i = 0; i < numberOfVariables; i++)
01963    {
01964       file >> initialArgument[i];        
01965    }
01966 
01967    // Optimization operators
01968 
01969    // Search direction method
01970 
01971    file >> word;
01972 
01973    if(word == "FletcherReeves")
01974    {
01975       searchDirectionMethod = FletcherReeves;
01976    }
01977    else if(word == "PolakRibiere")
01978    {
01979       searchDirectionMethod = PolakRibiere;
01980    }
01981 
01982    // Optimal step size method
01983 
01984    file >> word;
01985 
01986    file >> word;
01987 
01988    if(word == "GoldenSection")
01989    {
01990       optimalStepSizeMethod = GoldenSection;
01991    }
01992    else if(word == "BrentMethod")
01993    {
01994       optimalStepSizeMethod = BrentMethod;
01995    }
01996 
01997    // Optimization parameters
01998 
01999    // First step size
02000 
02001    file >> word;
02002 
02003    file >> firstStepSize;
02004 
02005    // Tolerance
02006 
02007    file >> word;
02008 
02009    file >> optimalStepSizeTolerance;
02010 
02011    // Stopping criteria: 
02012 
02013    // Evaluation goal
02014 
02015    file >> word;
02016 
02017    file >> evaluationGoal;
02018 
02019    // Norm of objective function gradient goal
02020 
02021    file >> word;
02022 
02023    file >> gradientNormGoal;
02024 
02025    // Maximum time
02026 
02027    file >> word;
02028 
02029    file >> maximumTime;
02030 
02031    // Maximum number of iterations
02032 
02033    file >> word;
02034 
02035    file >> maximumNumberOfIterations;
02036 
02037    // User stuff: 
02038 
02039    // Warning step size
02040 
02041    file >> word;
02042 
02043    file >> warningStepSize;
02044 
02045    // Iterations between showing progress
02046 
02047    file >> word;
02048 
02049    file >> showPeriod;
02050 
02051    // Close file
02052 
02053    file.close();
02054 }
02055 
02056 
02057 // void saveOptimizationHistory(char*) method
02058 
02059 /// This method saves the optimization history to a data file:
02060 ///
02061 /// <ol>
02062 /// <li> Iteration.
02063 /// <li> Objective function evaluation.
02064 /// <li> Objective function gradient norm.
02065 /// </ol>
02066 ///
02067 /// @param filename: Filename.
02068 
02069 void ConjugateGradient::saveOptimizationHistory(char* filename)
02070 {
02071    std::fstream file;
02072 
02073    file.open(filename, std::ios::out);
02074 
02075    if(!file.is_open())
02076    {
02077       std::cout << std::endl 
02078                 << "Error: ConjugateGradient class. "
02079                 << "void saveOptimizationHistory(char*) method."
02080                 << std::endl
02081                 << "Cannot open optimization history data file." << std::endl
02082                 << std::endl;
02083 
02084       exit(1);
02085    }
02086    else
02087    {
02088       std::cout << std::endl 
02089                 << "Saving optimization history to data file..." << std::endl;
02090    }
02091 
02092    // Write file header
02093 
02094    file << "% Purple: An Open Source Numerical Optimization C++ Library." 
02095         << std::endl 
02096         << "% Conjugate Gradient Optimization History." << std::endl
02097         << "% 1 - Iteration." << std::endl
02098         << "% 2 - Objective function evaluation." << std::endl
02099         << "% 3 - Objective function gradient norm." << std::endl;
02100 
02101    // Write file data
02102 
02103    int size = evaluationHistory.getSize();
02104 
02105    for (int i = 0; i < size; i++)
02106    {
02107       file << i << " "
02108            << evaluationHistory[i] << " "
02109            << gradientNormHistory[i] << std::endl;
02110    }
02111 
02112    file << std::endl;
02113 
02114    file.close();
02115 }
02116 
02117 
02118 // double getMinimum(Vector<double>) method
02119 
02120 /// This method returns the minimum element in a Vector of double precision numbers.
02121 ///
02122 /// @param v: Vector of double precision numbers.
02123 ///
02124 /// @see getGoldenSectionStepSize(double, double, Vector<double>, Vector<double>)
02125 
02126 double ConjugateGradient::getMinimum(Vector<double> v)
02127 {
02128    int n = v.getSize();
02129 
02130    double minimum = v[0];
02131 
02132    for(int i = 1; i < n; i++)
02133    {
02134       if(v[i] < minimum)
02135       {
02136          v[i] = minimum;
02137       }
02138    }
02139 
02140    return(minimum);
02141 }
02142 
02143 
02144 // double getMaximum(Vector<double>) method
02145 
02146 /// This method returns the maximum element in a Vector of double precision numbers.
02147 ///
02148 /// @param v: Vector of double precision numbers.
02149 ///
02150 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>)
02151 
02152 double ConjugateGradient::getMaximum(Vector<double> v)
02153 {
02154    int n = v.getSize();
02155 
02156    double maximum = v[0];
02157 
02158    for(int i = 1; i < n; i++)
02159    {
02160       if(v[i] > maximum)
02161       {
02162          v[i] = maximum;
02163       }
02164    }
02165 
02166    return(maximum);
02167 }
02168 
02169 }
02170 
02171 
02172 // Purple: An Open Source Numerical Optimization C++ Library.
02173 // Copyright (C) 2006 Roberto Lopez
02174 //
02175 // This library is free software; you can redistribute it and/or
02176 // modify it under the terms of the GNU Lesser General Public
02177 // License as published by the Free Software Foundation; either
02178 // version 2.1 of the License, or any later version.
02179 //
02180 // This library is distributed in the hope that it will be useful,
02181 // but WITHOUT ANY WARRANTY; without even the implied warranty of
02182 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
02183 // Lesser General Public License for more details.
02184 
02185 // You should have received a copy of the GNU Lesser General Public
02186 // License along with this library; if not, write to the Free Software
02187 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA

Generated on Wed Jun 21 13:10:37 2006 for Purple by  doxygen 1.4.7