Main Page   Modules   Data Structures   File List   Data Fields   Globals   Related Pages  

gnn_gradient_descent.c

Go to the documentation of this file.
00001 /***************************************************************************
00002  *  @file gnn_gradient_descent.c
00003  *  @brief Gradient Descent Trainer Implementation.
00004  *
00005  *  @date   : 31-08-03 21:32
00006  *  @author : Pedro Ortega C. <peortega@dcc.uchile.cl>
00007  *  Copyright  2003  Pedro Ortega C.
00008  ****************************************************************************/
00009 /*
00010  *  This program is free software; you can redistribute it and/or modify
00011  *  it under the terms of the GNU General Public License as published by
00012  *  the Free Software Foundation; either version 2 of the License, or
00013  *  (at your option) any later version.
00014  *
00015  *  This program is distributed in the hope that it will be useful,
00016  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00018  *  GNU Library General Public License for more details.
00019  *
00020  *  You should have received a copy of the GNU General Public License
00021  *  along with this program; if not, write to the Free Software
00022  *  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00023  */
00024 
00025 
00026 
00027 /**
00028  * @defgroup gnn_gradient_descent_doc gnn_gradient_descent : Gradient Descent Algorithm.
00029  * @ingroup gnn_trainer_doc
00030  *
00031  * The present trainer provides an implementation of the gradient descent
00032  * algorithm for parameter optimization. Gradient descent is the simplest
00033  * parameter optimization procedure based on the gradient. At each step,
00034  * the parameters are updated using the following rule:
00035  * \f[ \Delta w \leftarrow - \mu \frac{\partial E}{\partial w} \f]
00036  * where \f$ \mu \f$ is called the "learning rate", and determines the
00037  * step-size that are taken. The value of the learning rate depends on the
00038  * problem, but tipical values lie between \f$[0.0001, 0.1]\f$. Smaller values
00039  * tend to get trapped at local minima, but larger values often overshoot
00040  * optimum values.
00041  */
00042 
00043 
00044 
00045 /******************************************/
00046 /* Include Files                          */
00047 /******************************************/
00048 
00049 #include "gnn_gradient_descent.h"
00050 
00051 
00052 
00053 /******************************************/
00054 /* Static Declaration                     */
00055 /******************************************/
00056 
00057 typedef struct _gnn_gradient_descent gnn_gradient_descent;
00058 
00059 struct _gnn_gradient_descent
00060 {
00061     gnn_trainer trainer;
00062     gsl_vector *w;
00063     double mu;
00064 };
00065 
00066 
00067 
00068 static int
00069 gnn_gradient_descent_train (gnn_trainer *trainer);
00070 
00071 static void
00072 gnn_gradient_descent_destroy (gnn_trainer *trainer);
00073 
00074 
00075 
00076 /******************************************/
00077 /* Static Implementation                  */
00078 /******************************************/
00079 
00080 /**
00081  * @brief The trainer's "reset" implementation.
00082  * @ingroup gnn_gradient_descent_doc
00083  *
00084  * @param  trainer A pointer to a \ref gnn_gradient_descent.
00085  * @return Returns 0 if suceeded.
00086  */
00087 static int
00088 gnn_gradient_descent_reset (gnn_trainer *trainer)
00089 {
00090     gnn_gradient_descent *gdtrainer;
00091     
00092     assert (trainer != NULL);
00093     
00094     gdtrainer = (gnn_gradient_descent *) trainer;
00095     
00096     /* get parameters */
00097     gnn_node_param_get (trainer->node, gdtrainer->w);
00098     
00099     return 0;
00100 }
00101 
00102 /**
00103  * @brief The trainer's "train" implementation.
00104  * @ingroup gnn_gradient_descent_doc
00105  *
00106  * @param  trainer A pointer to a \ref gnn_gradient_descent.
00107  * @return Returns the mean cost.
00108  */
00109 static int
00110 gnn_gradient_descent_train (gnn_trainer   *trainer)
00111 {
00112     gsl_vector *dw;
00113     gnn_gradient_descent *gdtrainer;
00114     
00115     /* get view */
00116     gdtrainer = (gnn_gradient_descent *) trainer;
00117     
00118     /* process minibatch */
00119     gnn_trainer_batch_process (trainer);
00120     
00121     /* move to next minibatch */
00122     gnn_trainer_batch_next (trainer);
00123 
00124     /* get gradient */
00125     dw = gnn_trainer_batch_get_dw (trainer);
00126     
00127     /* scale with learning factor */
00128     gsl_vector_scale (dw, - gdtrainer->mu);
00129     
00130     /* sum to parameters */
00131     gsl_vector_add (gdtrainer->w, dw);
00132     
00133     /* update parameters */
00134     gnn_node_param_set (trainer->node, gdtrainer->w);
00135     
00136     return 0;
00137 }
00138 
00139 /**
00140  * @brief The trainers "destroy" implementation.
00141  * @ingroup gnn_gradient_descent_doc
00142  *
00143  * @param trainer A pointer to a \ref gnn_gradient_descent.
00144  */
00145 static void
00146 gnn_gradient_descent_destroy (gnn_trainer *trainer)
00147 {
00148     gnn_gradient_descent *gdtrainer;
00149     
00150     assert (trainer != NULL);
00151 
00152     gdtrainer = (gnn_gradient_descent *) trainer;
00153     
00154     gsl_vector_free (gdtrainer->w);
00155     
00156     return;
00157 }
00158 
00159 
00160 
00161 /******************************************/
00162 /* Public Interface                       */
00163 /******************************************/
00164 
00165 /**
00166  * @brief Creates a new gradient descent trainer.
00167  * @ingroup gnn_gradient_descent_doc
00168  *
00169  * This function creates a new gradient descent trainer
00170  * (\ref gnn_gradient_descent). It uses the learning rate given by "mu".
00171  *
00172  * @param  node A pointer to a \ref gnn_node.
00173  * @param  crit A pointer to a \ref gnn_criterion.
00174  * @param  data A pointer to a \ref gnn_dataset.
00175  * @param  mu   The learning rate \f$\mu\f$.
00176  * @return Returns a pointer to a new \ref gnn_gradient_descent trainer.
00177  */
00178 gnn_trainer *
00179 gnn_gradient_descent_new (gnn_node *node,
00180                           gnn_criterion *crit,
00181                           gnn_dataset *data,
00182                           double mu)
00183 {
00184     int status;
00185     gnn_trainer *trainer;
00186     gnn_gradient_descent *gdtrainer;
00187     
00188     /* check that mu isn't negative */
00189     if (mu <= 0.0)
00190     {
00191         GSL_ERROR_VAL ("learning factor should be stricly positive",
00192                        GSL_EINVAL, NULL);
00193     }
00194     
00195     /* allocate memory for the trainer */
00196     gdtrainer = (gnn_gradient_descent *) malloc (sizeof (gnn_gradient_descent));
00197     if (gdtrainer == NULL)
00198     {
00199         GSL_ERROR_VAL ("couldn't allocate memory for gnn_gradient_descent",
00200                        GSL_ENOMEM, NULL);
00201     }
00202     
00203     /* get view as gnn_trainer */
00204     trainer = (gnn_trainer *) gdtrainer;
00205     
00206     /* initialize */
00207     status = gnn_trainer_init (trainer,
00208                                "gnn_gradient_descent",
00209                                node,
00210                                crit,
00211                                data,
00212                                gnn_gradient_descent_reset,
00213                                gnn_gradient_descent_train,
00214                                gnn_gradient_descent_destroy);
00215     if (status)
00216     {
00217         GSL_ERROR_VAL ("couldn't initialize gnn_gradient_descent",
00218                        GSL_EFAILED, NULL);
00219     }
00220     
00221     /* set fields */
00222     gdtrainer->mu = mu;
00223     gdtrainer->w  = gsl_vector_alloc (gnn_node_param_get_size (node));
00224     if (gdtrainer->w == NULL)
00225     {
00226         GSL_ERROR_VAL ("couldn't allocate memory for gnn_gradient_descent",
00227                        GSL_ENOMEM, NULL);
00228     }
00229 
00230     return trainer;
00231 }
00232 
00233 /**
00234  * @brief Gets the learning rate.
00235  * @ingroup gnn_gradient_descent_doc
00236  *
00237  * This function returns the learning rate \f$\mu\f$ used by the gradient
00238  * descent trainer.
00239  *
00240  * @param  trainer A pointer to a \ref gnn_gradient_descent.
00241  * @return Returns the learning rate \f$\mu\f$.
00242  */
00243 double
00244 gnn_gradient_descent_get_mu (gnn_trainer *trainer)
00245 {
00246     gnn_gradient_descent *gdtrainer;
00247 
00248     assert (trainer != NULL);
00249 
00250     gdtrainer = (gnn_gradient_descent *) trainer;
00251     return gdtrainer->mu;
00252 }
00253 
00254 /**
00255  * @brief Sets the learning rate.
00256  * @ingroup gnn_gradient_descent_doc
00257  *
00258  * This function sets a new value for the learning rate \f$\mu\f$ used by
00259  * the gradient descent trainer. The learning rate should be strictly positive.
00260  *
00261  * @param  trainer A pointer to a \ref gnn_gradient_descent.
00262  * @param  mu      The learning rate \f$\mu\f$.
00263  * @return Returns 0 if suceeded.
00264  */
00265 int
00266 gnn_gradient_descent_set_mu (gnn_trainer *trainer, double mu)
00267 {
00268     gnn_gradient_descent *gdtrainer;
00269 
00270     assert (trainer != NULL);
00271     
00272     /* check learning factor */
00273     if (mu <= 0.0)
00274         GSL_ERROR ("learning factor should be stricly positive", GSL_EINVAL);
00275 
00276     /* get view */
00277     gdtrainer = (gnn_gradient_descent *) trainer;
00278 
00279     /* set new learning factor */
00280     gdtrainer->mu = mu;
00281     
00282     return 0;
00283 }
00284 
00285 
00286 

Generated on Sun Jun 13 20:50:11 2004 for libgnn Gradient Retropropagation Machine Library by doxygen1.2.18