GradientDescent.cpp

Go to the documentation of this file.
00001 /******************************************************************************/
00002 /*                                                                            */
00003 /*   G R A D I E N T   D E S C E N T   C L A S S                              */
00004 /*                                                                            */
00005 /*   Roberto Lopez                                                            */
00006 /*   International Center for Numerical Methods in Engineering (CIMNE)        */
00007 /*   Technical University of Catalonia (UPC)                                  */
00008 /*   Barcelona, Spain                                                         */
00009 /*   E-mail: rlopez@cimne.upc.edu                                             */
00010 /*                                                                            */
00011 /******************************************************************************/
00012 
00013 #include "GradientDescent.h"
00014 
00015 #include <iostream>
00016 #include <fstream>
00017 #include <stdlib.h>
00018 #include <math.h>
00019 #include <time.h>
00020 
00021 namespace Purple
00022 {
00023 
00024 // GENERAL CONSTRUCTOR 
00025 
00026 /// General constructor. It creates a gradient descent object
00027 /// associated to an objective function object.
00028 /// It also initializes the class members to their default values:
00029 ///
00030 /// Initial argument: Random point whithin the objective function domain.
00031 ///
00032 /// Optimization operators:
00033 /// <ul>
00034 /// <li> Optimal step size method = Brent method;
00035 /// </ul>
00036 ///
00037 /// Optimization parameters:
00038 /// <ul>
00039 /// <li> First step size: 1.0e-3.
00040 /// <li> Optimal step size tolerance: 1.0e-3.
00041 /// </ul>
00042 ///
00043 /// Stopping criteria:
00044 /// <ul> 
00045 /// <li> Evaluation goal: -1.0e69.
00046 /// <li> Gradient norm goal: 0.0.
00047 /// <li> Maximum time: 1.0e6.
00048 /// <li> Maximum number of iterations: 1000. 
00049 /// </ul> 
00050 ///  
00051 /// User stuff: 
00052 /// <ul>
00053 /// <li> Warning step size: 1000.
00054 /// <li> Show period: 25. 
00055 /// </ul>
00056 ///
00057 /// @param newObjectiveFunction: Pointer to an objective function object.
00058 ///
00059 /// @see ObjectiveFunction.
00060 /// @see OptimizationAlgorithm.
00061 
00062 GradientDescent::GradientDescent(ObjectiveFunction* newObjectiveFunction)
00063 : OptimizationAlgorithm(newObjectiveFunction)
00064 {
00065    // Optimization operators
00066 
00067    optimalStepSizeMethod = GoldenSection;//BrentMethod;
00068 
00069    // Optimization parameters
00070 
00071    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00072 
00073    Vector<double> lowerBound = objectiveFunction->getLowerBound();
00074    Vector<double> upperBound = objectiveFunction->getUpperBound();
00075 
00076    Vector<double> newInitialArgument(numberOfVariables, 0.0);
00077 
00078    for(int i = 0; i < numberOfVariables; i++)
00079    {
00080       double random = (double)rand()/(RAND_MAX+1.0);
00081 
00082       newInitialArgument[i] 
00083       = lowerBound[i] + (upperBound[i] - lowerBound[i])*random;
00084    }
00085 
00086    initialArgument = newInitialArgument;
00087 
00088    firstStepSize = 1.0e-3;
00089    optimalStepSizeTolerance = 1.0e-3;
00090 
00091    // Stopping criteria
00092 
00093    evaluationGoal = -1.0e69;
00094    gradientNormGoal = 0.0;
00095    maximumTime = 1.0e6;
00096    maximumNumberOfIterations = 1000;
00097 
00098    // User stuff
00099 
00100    warningStepSize = 1000.0;
00101    showPeriod = 25;
00102 }
00103 
00104 
00105 // DEFAULT CONSTRUCTOR
00106 
00107 /// Default constructor. It creates a gradient descent optimization algorithm object
00108 /// not associated to any objective function object.
00109 /// It also initializes the class members to their default values:
00110 ///
00111 /// Optimization operators:
00112 /// <ul>
00113 /// <li> Optimal step size method = Brent method;
00114 /// </ul>
00115 ///
00116 /// Optimization parameters:
00117 /// <ul>
00118 /// <li> First step size: 1.0e-3.
00119 /// <li> Optimal step size tolerance: 1.0e-3.
00120 /// </ul>
00121 ///
00122 /// Stopping criteria:
00123 /// <ul> 
00124 /// <li> Evaluation goal: -1.0e69.
00125 /// <li> Gradient norm goal: 0.0.
00126 /// <li> Maximum time: 1.0e6.
00127 /// <li> Maximum number of iterations: 1000. 
00128 /// </ul> 
00129 ///  
00130 /// User stuff: 
00131 /// <ul>
00132 /// <li> Warning step size: 1000.
00133 /// <li> Show period: 25. 
00134 /// </ul>
00135 ///
00136 /// @see OptimizationAlgorithm.
00137 
00138 GradientDescent::GradientDescent(void) : OptimizationAlgorithm()
00139 {
00140    // Optimization operators
00141 
00142    optimalStepSizeMethod = BrentMethod;
00143 
00144    // Optimization parameters
00145 
00146    firstStepSize = 1.0e-3;
00147    optimalStepSizeTolerance = 1.0e-3;
00148 
00149    // Stopping criteria
00150 
00151    evaluationGoal = -1.0e69;
00152    gradientNormGoal = 0.0;
00153    maximumTime = 1.0e6;
00154    maximumNumberOfIterations = 1000;
00155 
00156    // User stuff
00157 
00158    warningStepSize = 1000.0;
00159    showPeriod = 25;
00160 }
00161 
00162 
00163 // DESTRUCTOR
00164 
00165 /// Destructor.
00166 
00167 GradientDescent::~GradientDescent(void)
00168 {
00169 
00170 }
00171 
00172 
00173 // METHODS
00174 
00175 // OptimalStepSizeMethod getOptimalStepSizeMethod(void) method
00176 
00177 /// This method returns the search direction method used for optimization.
00178 ///
00179 /// @see getFletcherReevesSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00180 /// @see getPolakRibiereSearchDirection(Vector<double>, Vector<double>, Vector<double>).
00181 
00182 GradientDescent::OptimalStepSizeMethod
00183 GradientDescent::getOptimalStepSizeMethod(void)
00184 {
00185    return(optimalStepSizeMethod);
00186 }
00187 
00188 
00189 // Vector<double> getInitialArgument(void)
00190 
00191 /// This method returns the initial objective function argument to be used by 
00192 /// the gradient descent method for optimization. 
00193 
00194 Vector<double> GradientDescent::getInitialArgument(void)
00195 {
00196    return(initialArgument);
00197 }
00198 
00199 
00200 // double getGradientNormGoal(void) method
00201 
00202 /// This method returns the goal value for the norm of the objective function
00203 /// gradient.
00204 /// This is used as a stopping criterium when optimizing a function.
00205 
00206 double GradientDescent::getGradientNormGoal(void)
00207 {
00208    return(gradientNormGoal);
00209 }
00210 
00211 
00212 // int getMaximumNumberOfIterations(void) method
00213 
00214 /// This method returns the maximum number of iterations to be performed by the 
00215 /// gradient descent method during the optimization process. 
00216 /// This is used as a stopping criterium when optimizing an objective function.
00217 
00218 int GradientDescent::getMaximumNumberOfIterations(void)
00219 {
00220    return(maximumNumberOfIterations);
00221 }
00222 
00223 
00224 // double getFirstStepSize(void) method
00225 
00226 /// This method returns the initial step size for line search
00227 /// in the first iteration of the gradient descent.
00228 ///
00229 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00230 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00231 
00232 double GradientDescent::getFirstStepSize(void)
00233 {
00234    return(firstStepSize);
00235 }
00236 
00237 
00238 // double geOptimalStepSizeTolerance(void) method
00239 
00240 /// This method returns the tolerance value in line search.
00241 ///
00242 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00243 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00244 
00245 double GradientDescent::getOptimalStepSizeTolerance(void)
00246 {
00247    return(optimalStepSizeTolerance);
00248 }
00249 
00250 
00251 // double getWarningStepSize(void) method
00252 
00253 /// This method returns the step size value at wich a warning message is
00254 /// written to the screen during line search.
00255 ///
00256 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00257 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00258 
00259 double GradientDescent::getWarningStepSize(void)
00260 {
00261    return(warningStepSize);
00262 }
00263 
00264 
00265 // int getShowPeriod(void) method
00266 
00267 /// This method returns the number of iterations between the optimization 
00268 /// showing progress. 
00269 
00270 int GradientDescent::getShowPeriod(void)
00271 {
00272    return(showPeriod);    
00273 }
00274 
00275 
00276 // void setOptimalStepSizeMethod(OptimalStepSizeMethod) method
00277 
00278 /// This method sets a new optimal step size method to be used for optimization
00279 /// with the gradient descent method.
00280 ///
00281 /// @param newOptimalStepSizeMethod: Optimal step size method.
00282 ///
00283 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00284 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00285 
00286 void GradientDescent::setOptimalStepSizeMethod
00287 (GradientDescent::OptimalStepSizeMethod newOptimalStepSizeMethod)
00288 {
00289    optimalStepSizeMethod = newOptimalStepSizeMethod;
00290 }
00291 
00292 
00293 // void setInitialArgument(Vector<double>) method
00294 
00295 /// This method sets a new initial objective function argument to be used by 
00296 /// the gradient descent method for optimization. 
00297 ///
00298 /// @param newInitialArgument: Initial argument Vector.
00299 
00300 void GradientDescent::setInitialArgument(Vector<double> newInitialArgument)
00301 {
00302    int size = newInitialArgument.getSize();
00303 
00304    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00305 
00306    if(size != numberOfVariables)
00307    {
00308       std::cout << std::endl
00309                 << "Error: GradientDescent class. "
00310                 << "double setInitialArgument(Vector<double>) method." << std::endl
00311                 << "Size of initial argument must be equal to number of variables."
00312                 << std::endl << std::endl;
00313 
00314       exit(1);
00315    }
00316 
00317 
00318    initialArgument = newInitialArgument;
00319 }
00320 
00321 
00322 // void setGradientNormGoal(double) method
00323 
00324 /// This method sets a new the goal value for the norm of the 
00325 /// objective function gradient. 
00326 /// This is used as a stopping criterium when optimizing an objective function.
00327 ///
00328 /// @param newGradientNormGoal: 
00329 /// Goal value for the norm of the objective function gradient.
00330 
00331 void GradientDescent::setGradientNormGoal(double newGradientNormGoal)
00332 {
00333    if(gradientNormGoal < 0.0)
00334    {
00335       std::cout << std::endl
00336                 << "Error: GradientDescent class. " << std::endl
00337                 << "void setGradientNormGoal(double) method."
00338                 << std::endl
00339                 << "Gradient norm goal must be equal or greater than 0."
00340                 << std::endl << std::endl;
00341       exit(1);
00342    }
00343 
00344    // Set gradient norm goal
00345 
00346    gradientNormGoal = newGradientNormGoal;
00347 }
00348 
00349 
00350 // void setMaximumNumberOfIterations(int) method
00351 
00352 /// This method sets a new maximum number of iterations in the optimization 
00353 /// process. 
00354 ///
00355 /// @param newMaximumNumberOfIterations: Maximum number of iterations.
00356 
00357 void GradientDescent
00358 ::setMaximumNumberOfIterations(int newMaximumNumberOfIterations)
00359 {
00360    if(newMaximumNumberOfIterations <= 0)
00361    {
00362       std::cout << std::endl
00363                 << "Error: GradientDescent class." << std::endl
00364                 << "void setMaximumNumberOfIterations(int) method." << std::endl
00365                 << "Maximum number of iterations must be greater than 0."
00366                 << std::endl
00367                 << std::endl;
00368 
00369       exit(1);
00370    }
00371 
00372    // Set maximum number of iterations
00373 
00374    maximumNumberOfIterations = newMaximumNumberOfIterations;
00375 }
00376 
00377 
00378 // void setShowPeriod(int) method
00379 
00380 /// This method sets a new number of iterations between the optimization
00381 /// showing progress. 
00382 ///
00383 /// @param newShowPeriod: Show period.
00384 
00385 void GradientDescent::setShowPeriod(int newShowPeriod)
00386 {
00387    if(newShowPeriod <= 0)
00388    {
00389       std::cout << std::endl
00390                 << "Error: GradientDescent class. "  << std::endl
00391                 << "void setShowPeriod(int) method." << std::endl
00392                 << "Show period must be greater than 0."
00393                 << std::endl << std::endl;
00394 
00395       exit(1);
00396    }
00397    
00398    // Set show period
00399    
00400    showPeriod = newShowPeriod;
00401 }
00402 
00403 
00404 // void setFirstStepSize(double) method
00405 
00406 /// This method sets a new initial step size for line search
00407 /// in the first iteration of the gradient descent.
00408 ///
00409 /// @param newFirstStepSize: First step size.
00410 ///
00411 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00412 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00413 
00414 void GradientDescent::setFirstStepSize(double newFirstStepSize)
00415 {
00416    if(newFirstStepSize <= 0.0)
00417    {
00418       std::cout << std::endl
00419                 << "Error: GradientDescent class. void setFirstStepSize(double) method."
00420                 << std::endl
00421                 << "First step size must be greater than 0." << std::endl
00422                 << std::endl;
00423 
00424       exit(1);
00425    }
00426    else
00427    {
00428       firstStepSize = newFirstStepSize;
00429    }
00430 }
00431 
00432 
00433 // void optimalStepSizeTolerance(double) method
00434 
00435 /// This method sets a new tolerance value to be used in line minimization.
00436 ///
00437 /// @param newOptimalStepSizeTolerance: Line search tolerance.
00438 ///
00439 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00440 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00441 
00442 void GradientDescent
00443 ::setOptimalStepSizeTolerance(double newOptimalStepSizeTolerance)
00444 {
00445    if(newOptimalStepSizeTolerance <= 0.0)
00446    {
00447       std::cout << std::endl
00448                 << "Error: GradientDescent class. "
00449                 << "void setOptimalStepSizeTolerance(double) method."
00450                 << std::endl
00451                 << "Tolerance must be greater than 0." << std::endl
00452                 << std::endl;
00453 
00454       exit(1);
00455    }
00456    else
00457    {
00458       optimalStepSizeTolerance = newOptimalStepSizeTolerance;
00459    }
00460 }
00461 
00462 
00463 // void setWarningStepSize(double) method
00464 
00465 /// This method sets a new step size value at wich a warning message is written
00466 /// to the screen during line minimization.
00467 ///
00468 /// @param newWarningStepSize: Warning step size value.
00469 ///
00470 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00471 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00472 
00473 void GradientDescent::setWarningStepSize(double newWarningStepSize)
00474 {
00475    if(newWarningStepSize <= 0.0)
00476    {
00477       std::cout << std::endl
00478                 << "Error: GradientDescent class. void setWarningStepSize(double) method."
00479                 << std::endl
00480                 << "Warning step size must be greater than 0." << std::endl
00481                 << std::endl;
00482 
00483       exit(1);
00484    }
00485    else
00486    {
00487       warningStepSize = newWarningStepSize;
00488    }
00489 }
00490 
00491 
00492 // double getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>)
00493 // method
00494 
00495 
00496 /// This method returns the optimal step size by searching in a given
00497 /// direction to locate the minimum of the objective function in that
00498 /// direction. It uses the golden section method.
00499 ///
00500 /// @param initialStepSize: Initial step size in line search.
00501 /// @param evaluation: Objective function evaluation value.
00502 /// @param argument: Objective function argument Vector.
00503 /// @param searchDirection: Search direction Vector.
00504 ///
00505 /// @see getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>).
00506 
00507 double GradientDescent::getGoldenSectionOptimalStepSize
00508 (double initialStepSize, double evaluation, Vector<double> argument,
00509 Vector<double> searchDirection)
00510 {
00511    double optimalStepSize = 0.0;
00512 
00513    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00514 
00515    Vector<double> potentialArgument(numberOfVariables, 0.0);
00516 
00517    double a = 0.0;
00518    double evaluationA = 0.0;
00519    double b = 0.0;
00520    double evaluationB = 0.0;
00521 
00522    double c = 0.0;      
00523    double evaluationC = 0.0;
00524    double d = 0.0;      
00525    double evaluationD = 0.0;
00526 
00527    double tau = (3.0-sqrt(5.0))/2.0; // 0.382
00528 
00529    // Start golden section search
00530 
00531    // Set initial a point
00532 
00533    a = 0.0;
00534 
00535    // Calculate evaluation for a
00536 
00537    evaluationA = evaluation;
00538 
00539    // Set initial b point
00540 
00541    b = initialStepSize;
00542 
00543    // Calculate evaluation for b
00544 
00545    for (int i = 0; i < numberOfVariables; i++)
00546    {
00547       potentialArgument[i] = argument[i] + searchDirection[i]*b;
00548    }
00549 
00550    evaluationB = objectiveFunction->getEvaluation(potentialArgument);
00551 
00552    // Find initial interval where minimum evaluation occurs
00553 
00554    while(evaluationA > evaluationB)
00555    {
00556       // Set new b
00557 
00558       b = 2.0*b;
00559 
00560       if(b >= warningStepSize)
00561       {
00562          std::cout << std::endl
00563                    << "Warning: Step size is " << b
00564                    << std::endl;
00565       }
00566 
00567       // Calculate evaluation for new b
00568 
00569       for (int i = 0; i < numberOfVariables; i++)
00570       {
00571          potentialArgument[i] = argument[i] + searchDirection[i]*b;
00572       }
00573 
00574       evaluationB = objectiveFunction->getEvaluation(potentialArgument);
00575    }
00576 
00577    // Initialize c and d (interior points for line minimization)
00578 
00579    // Initialize c point
00580 
00581    c = a + tau*(b-a);
00582 
00583    // Calculate evaluation for c
00584 
00585    for (int i = 0; i < numberOfVariables; i++)
00586    {
00587       potentialArgument[i] = argument[i] + searchDirection[i]*c;
00588    }
00589 
00590    evaluationC = objectiveFunction->getEvaluation(potentialArgument);
00591 
00592    // Initialize d point
00593 
00594    d = b - tau*(b-a);
00595 
00596    // Calculate evaluation for d
00597 
00598    for (int i = 0; i < numberOfVariables; i++)
00599    {
00600       potentialArgument[i] = argument[i] + searchDirection[i]*d;
00601    }
00602 
00603    evaluationD = objectiveFunction->getEvaluation(potentialArgument);
00604 
00605    // Reduce the interval with the golden section algorithm
00606 
00607    while(b-a > optimalStepSizeTolerance)
00608    {
00609       Vector<double> evaluationVectorLeft(3, 0.0);
00610       evaluationVectorLeft[0] = evaluationA;
00611       evaluationVectorLeft[1] = evaluationC;
00612       evaluationVectorLeft[2] = evaluationD;
00613 
00614       double minimumEvaluationLeft = getMinimum(evaluationVectorLeft);
00615 
00616       Vector<double> evaluationVectorRight(3, 0.0);
00617       evaluationVectorRight[0] = evaluationB;
00618       evaluationVectorRight[1] = evaluationC;
00619       evaluationVectorRight[2] = evaluationD;
00620 
00621       double minimumEvaluationRight = getMinimum(evaluationVectorRight);
00622 
00623       if((evaluationC <= evaluationD && evaluationB >= minimumEvaluationLeft)
00624       || (evaluationA <=  minimumEvaluationRight))
00625 
00626       // There is a minimum between a and b
00627       {
00628          b=d; 
00629          d=c; 
00630 
00631          evaluationB = evaluationD;
00632          evaluationD = evaluationC;
00633 
00634          // Set new c point
00635 
00636          c = a + tau*(b-a);
00637 
00638          // Calculate evaluation for new c
00639 
00640          for (int i = 0; i < numberOfVariables; i++)
00641          {
00642             potentialArgument[i] = argument[i] + searchDirection[i]*c;
00643          }
00644 
00645          evaluationC = objectiveFunction->getEvaluation(potentialArgument);
00646       }
00647       else if((evaluationD <= evaluationC && evaluationA >= minimumEvaluationRight)
00648       || (evaluationB <= minimumEvaluationLeft))
00649 
00650       // There is a minimum between c and b
00651       {
00652          a = c; 
00653          c = d; 
00654 
00655          evaluationA = evaluationC;
00656          evaluationC = evaluationD;
00657 
00658          // Set new d point
00659 
00660          d = b - tau*(b-a);
00661 
00662          // Calculate evaluation for new d
00663 
00664          for (int i = 0; i < numberOfVariables; i++)
00665          {
00666             potentialArgument[i] = argument[i] + searchDirection[i]*d;
00667          }
00668 
00669          evaluationD = objectiveFunction->getEvaluation(potentialArgument);
00670       }
00671       else
00672       {
00673          std::cout << std::endl
00674                    << "Error: GradientDescent class." << std::endl
00675                    << "double getGoldenSectionStepSize "
00676                    << "(double, double, Vector<double>, Vector<double>, double) method."
00677                    << std::endl
00678                    << "Unable to find were the minimum is." << std::endl;
00679 
00680          exit(1);
00681       }
00682    }
00683 
00684    // Get minimum evaluation and optimal step size among points A, B, C and D
00685 
00686    double minimumEvaluation = evaluation;
00687 
00688    if(evaluationA < minimumEvaluation)
00689    {
00690       minimumEvaluation  = evaluationA;
00691       optimalStepSize = a;
00692    }
00693    else if(evaluationB < minimumEvaluation)
00694    {
00695       minimumEvaluation = evaluationB;
00696       optimalStepSize = b;
00697    }
00698    else if(evaluationC < minimumEvaluation)
00699    {
00700       minimumEvaluation = evaluationC;
00701       optimalStepSize = c;
00702    }
00703    else if(evaluationD < minimumEvaluation)
00704    {
00705       minimumEvaluation = evaluationD;
00706       optimalStepSize = d;
00707    }
00708 
00709    return(optimalStepSize);
00710 }
00711 
00712 
00713 // double getBrentMethodOptimalStepSize(double, double, Vector<double>, Vector<double>)
00714 // method
00715 
00716 /// This method returns the optimal step size by searching in a given
00717 /// direction to locate the minimum of the objective function in that
00718 /// direction. It uses the Brent's method.
00719 ///
00720 /// @param initialStepSize: Initial step size in line search.
00721 /// @param evaluation: Objective function evaluation value.
00722 /// @param argument: Objective function argument Vector.
00723 /// @param searchDirection: Search direction Vector.
00724 ///
00725 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>).
00726 
00727 double GradientDescent::getBrentMethodOptimalStepSize
00728 (double initialStepSize, double evaluation, Vector<double> argument,
00729 Vector<double> searchDirection)
00730 {
00731    double optimalStepSize = 0.0;
00732 
00733    int numberOfVariables = objectiveFunction->getNumberOfVariables();
00734 
00735    Vector<double> potentialArgument(numberOfVariables, 0.0);
00736 
00737    double a = 0.0;
00738    double evaluationA = 0.0;
00739    double b = 0.0;
00740    double evaluationB = 0.0;
00741 
00742    double u = 0.0;
00743    double evaluationU = 0.0;
00744    double v = 0.0;
00745    double evaluationV = 0.0;
00746    double w = 0.0;
00747    double evaluationW = 0.0;
00748    double x = 0.0;
00749    double evaluationX = 0.0;
00750 
00751    double tau = (3.0-sqrt(5.0))/2.0; // 0.382
00752 
00753    // Set initial a point
00754 
00755    a = 0.0;
00756 
00757    // Calculate evaluation for a
00758 
00759    evaluationA = evaluation;
00760 
00761    // Set initial b point
00762 
00763    b = initialStepSize;
00764 
00765    // Calculate evaluation for b
00766 
00767    for (int i = 0; i < numberOfVariables; i++)
00768    {
00769       potentialArgument[i] = argument[i] + searchDirection[i]*b;
00770    }
00771 
00772    evaluationB = objectiveFunction->getEvaluation(potentialArgument);
00773 
00774    // Find initial interval where minimum evaluation occurs
00775 
00776    while(evaluationA > evaluationB)
00777    {
00778       // Set new b
00779 
00780       b = 2.0*b;
00781 
00782       if(b >= warningStepSize)
00783       {
00784          std::cout << std::endl
00785                    << "Warning: Step size is  " << b
00786                    << std::endl;
00787       }
00788 
00789       // Calculate evaluation for new b
00790 
00791       for (int i = 0; i < numberOfVariables; i++)
00792       {
00793          potentialArgument[i] = argument[i] + searchDirection[i]*b;
00794       }
00795 
00796       evaluationB = objectiveFunction->getEvaluation(potentialArgument);
00797    }
00798 
00799    // Get intermediate point V
00800 
00801    v = a + tau*(b-a);
00802 
00803    // Calculate evaluation for V
00804 
00805    for (int i = 0; i < numberOfVariables; i++)
00806    {
00807       potentialArgument[i] = argument[i] + searchDirection[i]*v;
00808    }
00809 
00810    evaluationV = objectiveFunction->getEvaluation(potentialArgument);
00811 
00812    // Set initial W and X points
00813 
00814    w = v;
00815    evaluationW = evaluationV;
00816 
00817    x = v;
00818    evaluationX = evaluationV;
00819 
00820    // Maximum and minimum intervals ???
00821 
00822    double intervalLength = 0.0;
00823 
00824    bool goldenSection = false;
00825 
00826    // Reduce the interval
00827 
00828    while(b-a > optimalStepSizeTolerance)
00829    {
00830       // Quadratic interpolation
00831 
00832       if(w != x && w != v && x != v) // Can construct parabola
00833       {
00834          // zz vector
00835 
00836          Vector<double> stepSizeVector(3, 0.0);
00837          stepSizeVector[0] = v;
00838          stepSizeVector[1] = w;
00839          stepSizeVector[2] = x;
00840 
00841          std::sort(stepSizeVector.begin(), stepSizeVector.end(), std::less<double>());
00842 
00843          // pp vector
00844 
00845          Vector<double> evaluationVector(3, 0.0);
00846 
00847          for(int i = 0; i < 3; i++)
00848          {
00849             if(stepSizeVector[i] == v)
00850             {
00851                evaluationVector[i] = evaluationV;
00852             }
00853             else if(stepSizeVector[i] == w)
00854             {
00855                stepSizeVector[i] = evaluationW;
00856             }
00857             else if(stepSizeVector[i] == x)
00858             {
00859                stepSizeVector[i] = evaluationX;
00860             }
00861             else
00862             {
00863                std::cout << std::endl
00864                          << "Error: GradientDescent class." << std::endl
00865                          << "double getBrentMethodOptimalStepSize" << std::endl
00866                          << "(double, double, Vector<double>, Vector<double>) method."
00867                          << std::endl
00868                          << "Unable to construct step size and evaluation vectors right."
00869                          << std::endl << std::endl;
00870 
00871                exit(1);
00872             }
00873          }
00874 
00875          // xStar is the minimum of the parabola through the three step size points
00876 
00877          double numerator
00878          = (pow(stepSizeVector[2],2) - pow(stepSizeVector[1],2))*evaluationVector[0]
00879          + (pow(stepSizeVector[1],2) - pow(stepSizeVector[0],2))*evaluationVector[2]
00880          + (pow(stepSizeVector[0],2) - pow(stepSizeVector[2],2))*evaluationVector[1];
00881 
00882          double denominator
00883          = (stepSizeVector[2] - stepSizeVector[1])*evaluationVector[0]
00884          + (stepSizeVector[1] - stepSizeVector[0])*evaluationVector[2]
00885          + (stepSizeVector[0] - stepSizeVector[2])*evaluationVector[1];
00886 
00887          double xStar = 0.5*numerator/denominator;
00888 
00889          if(xStar < b && a < xStar) // xStar is in [a,b]
00890          {
00891             u = xStar;
00892 
00893             // Good, no need to perform golden section
00894 
00895             goldenSection = false;
00896          }
00897          else // xStar is not in [a,b]
00898          {
00899             // Bad, need to perform golden section
00900 
00901             goldenSection = true;
00902          }
00903       }
00904       else // Cannot construct parabola
00905       {
00906          // Bad, need to perform golden section
00907 
00908          goldenSection = true;
00909       }
00910 
00911       //
00912       // Golden section
00913       //
00914 
00915       if(goldenSection == true)
00916       {
00917          if(x >= (a+b)/2.0)
00918          {
00919             u = x-tau*(x-a);
00920          }
00921          else
00922          {
00923             u = x+tau*(b-x);
00924          }
00925       }
00926 
00927       // Calculate evaluation for U
00928 
00929       for (int i = 0; i < numberOfVariables; i++)
00930       {
00931          potentialArgument[i] = argument[i] + searchDirection[i]*u;
00932       }
00933 
00934       evaluationU = objectiveFunction->getEvaluation(potentialArgument);
00935 
00936       // Update points
00937 
00938       if(evaluationU <= evaluationX)
00939       {
00940          if(u < x)
00941          {
00942             b = x;
00943             evaluationB = evaluationX;
00944          }
00945          else
00946          {
00947             a = x;
00948             evaluationA = evaluationX;
00949          }
00950 
00951          v = w;
00952          evaluationV = evaluationW;
00953 
00954          w = x;
00955          evaluationW = evaluationX;
00956 
00957          x = u;
00958          evaluationX = evaluationU;
00959       }
00960       else
00961       {
00962          if(u < x)
00963          {
00964             a = u;
00965             evaluationA = evaluationU;
00966          }
00967          else
00968          {
00969             b = u;
00970             evaluationB = evaluationU;
00971          }
00972 
00973          if((evaluationU <= evaluationW) || (w == x))
00974          {
00975              v = w;
00976              evaluationV = evaluationW;
00977 
00978              w = u;
00979              evaluationW = evaluationU;
00980          }
00981          else if((evaluationU <= evaluationV) || (v == x) || (v == w))
00982          {
00983             v = u;
00984             evaluationV = evaluationU;
00985          }
00986       }
00987    } // while loop
00988 
00989    // Get minimum evaluation and optimal step size among points A, B, V, W and X
00990 
00991    double minimum = evaluation;
00992 
00993    if(evaluationA <= minimum)
00994    {
00995       minimum = evaluationA;
00996       optimalStepSize = a;
00997    }
00998    else if(evaluationB <= minimum)
00999    {
01000       minimum = evaluationB;
01001       optimalStepSize = b;
01002    }
01003    else if(evaluationV <= minimum)
01004    {
01005       minimum = evaluationV;
01006       optimalStepSize = v;
01007    }
01008    else if(evaluationW <= minimum)
01009    {
01010       minimum = evaluationW;
01011       optimalStepSize = w;
01012    }
01013    else if(evaluationX <= minimum)
01014    {
01015       minimum = evaluationX;
01016       optimalStepSize = x;
01017    }
01018 
01019    return(optimalStepSize);
01020 }
01021 
01022 
01023 // Vector<double> getMinimalArgument(void) method
01024 
01025 /// This method optimizes an objective function according to the 
01026 /// gradient descent algorithm. 
01027 /// It returns the minimal argument of the objective function.
01028 /// Optimization occurs according to the optimization operators and the 
01029 /// optimization parameters.
01030 
01031 Vector<double> GradientDescent::getMinimalArgument(void)
01032 {
01033    int numberOfVariables = objectiveFunction->getNumberOfVariables();
01034 
01035    Vector<double> minimalArgument(numberOfVariables, 0.0);
01036    Vector<double> argument(numberOfVariables, 0.0);
01037 
01038    // Evaluation history vector
01039 
01040    Vector<double> newEvaluationHistory(maximumNumberOfIterations+1, 0.0);
01041 
01042    evaluationHistory = newEvaluationHistory;
01043 
01044    // Gradient norm history vector
01045 
01046    Vector<double> newGradientNormHistory(maximumNumberOfIterations+1, 0.0);
01047 
01048    gradientNormHistory = newGradientNormHistory;
01049 
01050    double evaluation = 0.0;
01051 
01052    Vector<double> gradient(numberOfVariables, 0.0);
01053 
01054    double gradientNorm = 0.0;
01055 
01056    Vector<double> searchDirection(numberOfVariables, 0.0);
01057 
01058    double initialStepSize = 0.0;
01059    double optimalStepSize = 0.0;
01060    double oldOptimalStepSize = 0.0;
01061 
01062    time_t beginningTime, currentTime;
01063 
01064    double elapsedTime = 0.0;
01065 
01066    // Set beginning time 
01067 
01068    time(&beginningTime);
01069 
01070    std::cout << std::endl
01071              << "Getting minimal argument with gradient descent..." 
01072              << std::endl;
01073 
01074  
01075    argument = initialArgument;
01076     
01077    // Initial objective function evaluation
01078    
01079    evaluation = objectiveFunction->getEvaluation(argument);
01080 
01081    evaluationHistory[0] = evaluation;
01082 
01083    if(evaluation <= evaluationGoal)
01084    {          
01085       std::cout << std::endl
01086                 << "Initial evaluation is less than goal." << std::endl
01087                 << "Initial evaluation: " << evaluation << std::endl;
01088       
01089       minimalArgument = argument;
01090 
01091       // Print minimal argument to screen
01092 
01093       std::cout << std::endl
01094                 << "Minimal argument:" << std::endl;
01095    
01096       for(int i = 0; i < numberOfVariables; i++)
01097       {
01098          std::cout << minimalArgument[i] << " ";        
01099       }
01100       
01101       return(minimalArgument);        
01102    }
01103    else
01104    {
01105       std::cout << "Initial evaluation: " <<  evaluation << std::endl;      
01106    }
01107 
01108    // Initial objective function gradient
01109 
01110    gradient = objectiveFunction->getGradient(argument);
01111 
01112    gradientNorm = objectiveFunction->getGradientNorm(gradient);
01113 
01114    gradientNormHistory[0] = gradientNorm;
01115 
01116    if(gradientNorm <= gradientNormGoal)
01117    {          
01118       std::cout << std::endl
01119                 << "Initial gradient norm is less than goal." << std::endl
01120                 << "Initial gradient norm: " << gradientNorm << std::endl;
01121               
01122       minimalArgument = argument;
01123      
01124       // Print minimal argument to screen
01125 
01126       std::cout << std::endl
01127                 << "Minimal argument:" << std::endl;
01128    
01129       for(int i = 0; i < numberOfVariables; i++)
01130       {
01131          std::cout << minimalArgument[i] << " ";        
01132       }
01133       
01134       return(minimalArgument);        
01135    }
01136    else
01137    {
01138       std::cout << "Initial gradient norm: " <<  gradientNorm << std::endl;      
01139    }
01140 
01141    // Main loop
01142 
01143    for(int iteration = 1; iteration <= maximumNumberOfIterations; iteration++)
01144    {      
01145       // Get search direction
01146 
01147       for(int i = 0; i < numberOfVariables; i++)
01148       {
01149          searchDirection[i] = -1.0*gradient[i];
01150       }
01151 
01152       // Get optimal step size
01153 
01154       if(iteration == 1)
01155       {
01156          initialStepSize = firstStepSize;
01157       }
01158       else
01159       {
01160          initialStepSize = oldOptimalStepSize;
01161       }
01162 
01163       switch(optimalStepSizeMethod)
01164       {
01165          case GoldenSection:
01166 
01167             optimalStepSize = getGoldenSectionOptimalStepSize
01168             (initialStepSize, evaluation, argument, searchDirection);
01169 
01170 
01171             break;
01172 
01173          case BrentMethod:
01174 
01175             optimalStepSize = getBrentMethodOptimalStepSize
01176             (initialStepSize, evaluation, argument, searchDirection);
01177 
01178             break;
01179       }
01180 
01181       // Optimal step size stopping criterium
01182 
01183       if(optimalStepSize == 0.0)
01184       {
01185          std::cout << std::endl
01186                    << "Iteration " << iteration << ": "
01187                    << "Optimal step size is zero." << std::endl;
01188 
01189          std::cout << "Final evaluation: " << evaluation << ";" << std::endl;
01190          std::cout << "Final gradient norm: " << gradientNorm << ";" << std::endl;
01191 
01192          break;
01193       }
01194 
01195       // Get new free parameters
01196 
01197       for (int i = 0; i < numberOfVariables; i++)
01198       {
01199          argument[i] += optimalStepSize*searchDirection[i];
01200       }
01201 
01202       // Objective function evaluation
01203    
01204       evaluation = objectiveFunction->getEvaluation(argument);
01205 
01206       evaluationHistory[iteration] = evaluation;
01207 
01208       // Objective function gradient
01209 
01210       gradient = objectiveFunction->getGradient(argument);
01211 
01212       gradientNorm = objectiveFunction->getGradientNorm(gradient);
01213 
01214       gradientNormHistory[iteration] = gradientNorm;
01215 
01216       // Stopping Criteria
01217 
01218       // Evaluation goal 
01219 
01220       if (evaluation <= evaluationGoal)
01221       {
01222             std::cout << std::endl
01223                       << "Iteration " << iteration << ": "
01224                       << "Evaluation goal reached." << std::endl;
01225 
01226             std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01227             std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01228 
01229             break;
01230       }
01231 
01232       // Norm of objective function gradient goal 
01233 
01234       if (gradientNorm <= gradientNormGoal)
01235       {
01236          std::cout << std::endl
01237                    << "Iteration " << iteration << ": "
01238                    << "Gradient norm goal reached."
01239                    << std::endl;  
01240 
01241          std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01242          std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01243 
01244          break;
01245       }
01246 
01247       // Maximum optimization time
01248 
01249       time(&currentTime);
01250 
01251       elapsedTime = difftime(currentTime, beginningTime);
01252 
01253       if (elapsedTime >= maximumTime)
01254       {
01255          std::cout << std::endl
01256                    << "Iteration " << iteration << ": "
01257                    << "Maximum optimization time reached."
01258                    << std::endl;
01259 
01260          std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01261          std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01262 
01263          break;
01264       }
01265 
01266       // Maximum number of iterations
01267 
01268       if (iteration == maximumNumberOfIterations)
01269       {
01270          std::cout << std::endl
01271                    << "Iteration " << iteration << ": "
01272                    << "Maximum number of iterations reached."
01273                    << std::endl;
01274 
01275          std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01276          std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01277 
01278          break;
01279       }
01280 
01281       // Progress
01282 
01283       if(iteration % showPeriod == 0)
01284       {
01285          std::cout << std::endl
01286                    << "Iteration " << iteration << "; " << std::endl;
01287 
01288          std::cout << "Evaluation: " << evaluation << ";" << std::endl;
01289          std::cout << "Gradient norm: " << gradientNorm << ";" << std::endl;
01290       }
01291 
01292       // Update
01293 
01294       oldOptimalStepSize = optimalStepSize;
01295    }
01296 
01297    // Set minimal argument
01298 
01299    minimalArgument = argument;
01300 
01301    // Print minimal argument to screen
01302 
01303    std::cout << std::endl
01304              << "Minimal argument:" << std::endl;
01305    
01306    for(int i = 0; i < numberOfVariables; i++)
01307    {
01308       std::cout << minimalArgument[i] << " ";        
01309    }
01310 
01311    std::cout << std::endl;
01312    
01313    return(minimalArgument);
01314 }
01315 
01316 
01317 // void print(void) method
01318 
01319 /// This method prints to the screen the optimization operators and
01320 /// the optimization parameters concerning the gradient descent object:
01321 ///
01322 /// Initial argument.
01323 ///
01324 /// Optimization operators:
01325 /// <ul>
01326 /// <li> Optimal step size method.
01327 /// </ul>
01328 ///
01329 /// Optimization parameters:
01330 /// <ul>
01331 /// <li> First step size.
01332 /// <li> Optimal step size tolerance.
01333 /// </ul>
01334 ///
01335 /// Stopping criteria:
01336 /// <ul> 
01337 /// <li> Evaluation goal.
01338 /// <li> Gradient norm goal.
01339 /// <li> Maximum time.
01340 /// <li> Maximum number of iterations. 
01341 /// </ul> 
01342 ///  
01343 /// User stuff: 
01344 /// <ul>
01345 /// <li> Warning step size.
01346 /// <li> Show period. 
01347 /// </ul>
01348 
01349 void GradientDescent::print(void)
01350 {
01351    std::cout << std::endl
01352              << "Gradient Descent Object." << std::endl;
01353 
01354    int numberOfVariables = objectiveFunction->getNumberOfVariables();
01355 
01356    // Initial argument
01357 
01358    std::cout << std::endl
01359              << "Initial argument:" << std::endl;
01360 
01361    for(int i = 0; i < numberOfVariables; i++)
01362    {
01363       std::cout << initialArgument[i] << " ";        
01364    }
01365    
01366    std::cout << std::endl;
01367 
01368    // Optimization operators
01369 
01370    std::cout << std::endl
01371              << "Optimization operators:" << std::endl;
01372 
01373    // Optimal step size method
01374 
01375    std::cout << "Optimal step size method:" << std::endl;
01376 
01377    switch(optimalStepSizeMethod)
01378    {
01379       case GoldenSection:
01380 
01381          std::cout << "Golden section" << std::endl;
01382 
01383       break;
01384 
01385       case BrentMethod:
01386 
01387          std::cout << "Brent Method" << std::endl;
01388 
01389       break;
01390    }
01391 
01392 
01393    // Optimization parameters
01394 
01395    std::cout << std::endl
01396              << "Optimization parameters: " << std::endl
01397              << "First step size: " << std::endl
01398              << firstStepSize << std::endl
01399              << "Optimal step size tolerance: " << std::endl
01400              << optimalStepSizeTolerance << std::endl;
01401 
01402    // Stopping criteria
01403 
01404    std::cout << std::endl
01405              << "Stopping criteria: " << std::endl
01406              << "Evaluation goal: " << std::endl
01407              << evaluationGoal << std::endl
01408              << "Gradient norm goal" << std::endl 
01409              << gradientNormGoal <<std::endl
01410              << "Maximum time: " << std::endl
01411              << maximumTime << std::endl
01412              << "Maximum number of iterations: " << std::endl
01413              << maximumNumberOfIterations << std::endl;
01414 
01415    // User stuff
01416 
01417    std::cout << std::endl
01418              << "User stuff: " << std::endl
01419              << "Warning step size: " << std::endl
01420              << warningStepSize << std::endl
01421              << "Show period: " << std::endl
01422              << showPeriod
01423              << std::endl;
01424 }
01425 
01426 
01427 // void save(char*) method
01428 
01429 /// This method saves the gradient descent object to a data file. 
01430 ///
01431 /// Initial argument.
01432 ///
01433 /// Optimization operators:
01434 /// <ul>
01435 /// <li> Optimal step size method.
01436 /// </ul>
01437 ///
01438 /// Optimization parameters:
01439 /// <ul>
01440 /// <li> First step size.
01441 /// <li> Optimal step size tolerance.
01442 /// </ul>
01443 ///
01444 /// Stopping criteria:
01445 /// <ul> 
01446 /// <li> Evaluation goal.
01447 /// <li> Gradient norm goal.
01448 /// <li> Maximum time.
01449 /// <li> Maximum number of iterations. 
01450 /// </ul> 
01451 ///  
01452 /// User stuff: 
01453 /// <ul>
01454 /// <li> Warning step size.
01455 /// <li> Show period. 
01456 /// </ul>
01457 ///
01458 /// @param filename: Filename.
01459 ///
01460 /// @see load(char*).
01461 
01462 void GradientDescent::save(char* filename)
01463 {
01464    // File
01465 
01466    std::fstream file;
01467 
01468    file.open(filename, std::ios::out);
01469 
01470    if(!file.is_open())
01471    {
01472       std::cout << std::endl 
01473                 << "Error: GradientDescent class. void save(char*) method."
01474                 << std::endl
01475                 << "Cannot open gradient descent object data file."  << std::endl
01476                 << std::endl;
01477 
01478       exit(1);
01479    }
01480    else
01481    {
01482       std::cout << std::endl
01483                 << "Saving gradient descent object to data file..." << std::endl;
01484    }
01485 
01486    // Write file header
01487 
01488    file << "% Purple: An Open Source Numerical Optimization C++ Library." 
01489         << std::endl 
01490         << "% Gradient Descent Object." << std::endl; 
01491 
01492    int numberOfVariables = objectiveFunction->getNumberOfVariables();
01493  
01494    // Initial argument
01495 
01496    file << "InitialArgument:" << std::endl;
01497 
01498    for(int i = 0; i < numberOfVariables; i++)
01499    {
01500       file << initialArgument[i] << " ";        
01501    }
01502    
01503    file << std::endl;
01504 
01505    // Optimization operators
01506 
01507    // Optimal step size method
01508 
01509    file << "OptimalStepSizeMethod:" << std::endl;
01510 
01511    switch(optimalStepSizeMethod)
01512    {
01513       case GoldenSection:
01514 
01515       file << "GoldenSection" << std::endl;
01516 
01517       break;
01518 
01519       case BrentMethod:
01520 
01521       file << "BrentMethod" << std::endl;
01522 
01523       break;
01524    }
01525 
01526    // Optimization parameters
01527 
01528    file << "FirstStepSize:" << std::endl
01529         << firstStepSize << std::endl
01530         << "OptimalStepSizeTolerance:" << std::endl
01531         << optimalStepSizeTolerance << std::endl;
01532 
01533    // Stopping criteria
01534 
01535    file << "EvaluationGoal:" << std::endl
01536         << evaluationGoal << std::endl
01537         << "GradientNormGoal:" << std::endl
01538         << gradientNormGoal << std::endl
01539         << "MaximumTime: " << std::endl
01540         << maximumTime << std::endl
01541         << "MaximumNumberOfIterations: " << std::endl
01542         << maximumNumberOfIterations << std::endl;
01543 
01544    // User stuff
01545 
01546    file << "WarningStepSize: " << std::endl
01547         << warningStepSize << std::endl
01548         << "ShowPeriod: " << std::endl
01549         << showPeriod << std::endl;
01550 
01551    file.close();
01552 }
01553 
01554 
01555 // void load(char*) method
01556 
01557 /// This method loads a gradient descent object from a data file. 
01558 /// Please mind about the file format, wich is specified in the User's Guide. 
01559 ///
01560 ///
01561 /// Initial argument.
01562 ///
01563 /// Optimization operators:
01564 /// <ul>
01565 /// <li> Optimal step size method.
01566 /// </ul>
01567 ///
01568 /// Optimization parameters:
01569 /// <ul>
01570 /// <li> First step size.
01571 /// <li> Optimal step size tolerance.
01572 /// </ul>
01573 ///
01574 /// Stopping criteria:
01575 /// <ul> 
01576 /// <li> Evaluation goal.
01577 /// <li> Gradient norm goal.
01578 /// <li> Maximum time.
01579 /// <li> Maximum number of iterations. 
01580 /// </ul> 
01581 ///  
01582 /// User stuff: 
01583 /// <ul>
01584 /// <li> Warning step size.
01585 /// <li> Show period. 
01586 /// </ul>
01587 ///
01588 /// @param filename: Filename.
01589 ///
01590 /// @see save(char*).
01591 
01592 void GradientDescent::load(char* filename)
01593 {
01594    // File
01595 
01596    std::fstream file;
01597 
01598    file.open(filename, std::ios::in);
01599 
01600    if(!file.is_open())
01601    {
01602       std::cout << std::endl
01603                 << "Error: GradientDescent class." << std::endl
01604                 << "void load(char*) method."
01605                 << std::endl
01606                 << "Cannot open gradient descent object data file."  << std::endl;
01607 
01608       exit(1);
01609    }
01610    else
01611    {
01612       std::cout << std::endl
01613                 << "Loading gradient descent object from data file..."
01614                 << std::endl;
01615    }
01616 
01617    std::string word;
01618 
01619    // Initial argument
01620 
01621    while(word != "InitialArgument:")
01622    {
01623       file >> word;
01624    }
01625 
01626    int numberOfVariables = objectiveFunction->getNumberOfVariables();
01627 
01628    for(int i = 0; i < numberOfVariables; i++)
01629    {
01630       file >> initialArgument[i];        
01631    }
01632 
01633    // Optimization operators
01634 
01635    // Optimal step size method
01636 
01637    file >> word;
01638 
01639    file >> word;
01640 
01641    if(word == "GoldenSection")
01642    {
01643       optimalStepSizeMethod = GoldenSection;
01644    }
01645    else if(word == "BrentMethod")
01646    {
01647       optimalStepSizeMethod = BrentMethod;
01648    }
01649 
01650    // Optimization parameters
01651 
01652    // First step size
01653 
01654    file >> word;
01655 
01656    file >> firstStepSize;
01657 
01658    // Tolerance
01659 
01660    file >> word;
01661 
01662    file >> optimalStepSizeTolerance;
01663 
01664    // Stopping criteria: 
01665 
01666    // Evaluation goal
01667 
01668    file >> word;
01669 
01670    file >> evaluationGoal;
01671 
01672    // Norm of objective function gradient goal
01673 
01674    file >> word;
01675 
01676    file >> gradientNormGoal;
01677 
01678    // Maximum time
01679 
01680    file >> word;
01681 
01682    file >> maximumTime;
01683 
01684    // Maximum number of iterations
01685 
01686    file >> word;
01687 
01688    file >> maximumNumberOfIterations;
01689 
01690    // User stuff: 
01691 
01692    // Warning step size
01693 
01694    file >> word;
01695 
01696    file >> warningStepSize;
01697 
01698    // Iterations between showing progress
01699 
01700    file >> word;
01701 
01702    file >> showPeriod;
01703 
01704    // Close file
01705 
01706    file.close();
01707 }
01708 
01709 
01710 // void saveOptimizationHistory(char*) method 
01711 
01712 /// This method saves the optimization history to a data file:
01713 ///
01714 /// <ol>
01715 /// <li> Iteration.
01716 /// <li> Objective function evaluation.
01717 /// <li> Objective function gradient norm.
01718 /// </ol>
01719 ///
01720 /// @param filename: Filename.
01721 
01722 void GradientDescent::saveOptimizationHistory(char* filename)
01723 {
01724    std::fstream file; 
01725 
01726    file.open(filename, std::ios::out);
01727 
01728    // Write file header 
01729 
01730    if(!file.is_open())
01731    {
01732       std::cout << std::endl 
01733                 << "Error: GradientDescent class. " << std::endl
01734                 << "void saveOptimizationHistory(char*) method."
01735                 << std::endl
01736                 << "Cannot open optimization history data file." << std::endl
01737                 << std::endl;
01738 
01739       exit(1);
01740    }
01741    else
01742    {
01743       std::cout << std::endl 
01744                 << "Saving optimization history to data file..." << std::endl;
01745    }
01746 
01747    // Write file header
01748 
01749    file << "% Purple: An Open Source Numerical Optimization C++ Library." 
01750         << std::endl 
01751         << "% Gradient Descent Optimization History." << std::endl
01752         << "% 1 - Iteration." << std::endl
01753         << "% 2 - Objective function evaluation." << std::endl
01754         << "% 3 - Objective function gradient norm." << std::endl;
01755 
01756    // Write file data
01757 
01758    int size = evaluationHistory.getSize();
01759 
01760    for (int i = 0; i < size; i++)
01761    {
01762       file << i << " "
01763            << evaluationHistory[i] << " "
01764            << gradientNormHistory[i] << std::endl;
01765    }
01766 
01767    file << std::endl;
01768 
01769    file.close();
01770 }
01771 
01772 // double getMinimum(Vector<double>) method
01773 
01774 /// This method returns the minimum element in a Vector of double precision numbers.
01775 ///
01776 /// @param v: Vector of double precision numbers.
01777 ///
01778 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>)
01779 
01780 double GradientDescent::getMinimum(Vector<double> v)
01781 {
01782    int n = v.getSize();
01783 
01784    double minimum = v[0];
01785 
01786    for(int i = 1; i < n; i++)
01787    {
01788       if(v[i] < minimum)
01789       {
01790          v[i] = minimum;
01791       }
01792    }
01793 
01794    return(minimum);
01795 }
01796 
01797 
01798 // double getMaximum(Vector<double>) method
01799 
01800 /// This method returns the maximum element in a Vector of double precision numbers.
01801 ///
01802 /// @param v: Vector of double precision numbers.
01803 ///
01804 /// @see getGoldenSectionOptimalStepSize(double, double, Vector<double>, Vector<double>)
01805 
01806 double GradientDescent::getMaximum(Vector<double> v)
01807 {
01808    int n = v.getSize();
01809 
01810    double maximum = v[0];
01811 
01812    for(int i = 1; i < n; i++)
01813    {
01814       if(v[i] > maximum)
01815       {
01816          v[i] = maximum;
01817       }
01818    }
01819 
01820    return(maximum);
01821 }
01822 
01823 }
01824 
01825 
01826 // Purple: An Open Source Numerical Optimization C++ Library.
01827 // Copyright (C) 2005 Roberto Lopez 
01828 //
01829 // This library is free software; you can redistribute it and/or
01830 // modify it under the terms of the GNU Lesser General Public
01831 // License as published by the Free Software Foundation; either
01832 // version 2.1 of the License, or any later version.
01833 //
01834 // This library is distributed in the hope that it will be useful,
01835 // but WITHOUT ANY WARRANTY; without even the implied warranty of
01836 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
01837 // Lesser General Public License for more details.
01838 
01839 // You should have received a copy of the GNU Lesser General Public
01840 // License along with this library; if not, write to the Free Software
01841 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA

Generated on Wed Jun 21 13:10:37 2006 for Purple by  doxygen 1.4.7