Main Page   Modules   Data Structures   File List   Data Fields   Globals   Related Pages  

gnn_rprop.c

Go to the documentation of this file.
00001 /***************************************************************************
00002  *  @file gnn_rprop.c
00003  *  @brief Resilient Backpropagation Trainer Implementation.
00004  *
00005  *  @date   : 18-09-03 20:36
00006  *  @author : Pedro Ortega C. <peortega@dcc.uchile.cl>
00007  *  Copyright  2003  Pedro Ortega C.
00008  ****************************************************************************/
00009 /*
00010  *  This program is free software; you can redistribute it and/or modify
00011  *  it under the terms of the GNU General Public License as published by
00012  *  the Free Software Foundation; either version 2 of the License, or
00013  *  (at your option) any later version.
00014  *
00015  *  This program is distributed in the hope that it will be useful,
00016  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00018  *  GNU Library General Public License for more details.
00019  *
00020  *  You should have received a copy of the GNU General Public License
00021  *  along with this program; if not, write to the Free Software
00022  *  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00023  */
00024 
00025 
00026 
00027 /**
00028  * @brief Resilient Backpropagation.
00029  * @defgroup gnn_rprop_doc gnn_rprop : Resilient Error Backpropagation Trainer.
00030  * @ingroup gnn_trainer_doc
00031  *
00032  * The present trainer provides an implementation of the Resilient
00033  * Error-Backpropagation Algorithm.
00034  *
00035  * The weights are updated using the following rule:
00036  * \f[ \Delta w_k^{(t)} =
00037  *         \left \{
00038  *         \begin{array}{ccl}
00039  *         -\Delta_k^{(t)} &,&
00040  *             \textrm{if } \frac{\partial E}{\partial w_k}^{(t)} > 0\\
00041  *         +\Delta_k^{(t)} &,&
00042  *             \textrm{if } \frac{\partial E}{\partial w_k}^{(t)} < 0\\
00043  *         0 &,& \textrm{else}\\
00044  *         \end{array}
00045  *         \right .
00046  * \f]
00047  * where \f$\Delta_k^{(t)}\f$ is the 'update-value' for the parameter
00048  * \f$w_k\f$ at time \f$t\f$. These are computed by the following rule:
00049  * \f[ \Delta_k^{(t)} =
00050  *         \left \{
00051  *         \begin{array}{ccl}
00052  *         \nu^+ \cdot \Delta_k^{(t-1)} &,& \textrm{if }
00053  *             \frac{\partial E}{\partial w_k}
00054  *             \cdot \frac{\partial E}{\partial w_k} > 0 \\
00055  *         \nu^- \cdot \Delta_k^{(t-1)} &,& \textrm{if }
00056  *             \frac{\partial E}{\partial w_k}
00057  *             \cdot \frac{\partial E}{\partial w_k} < 0 \\
00058  *         \Delta_k^{(t-1)} &,& \textrm{else}\\
00059  *         \end{array}
00060  *         \right .
00061  * \f]
00062  * where \f$0 < \nu^- < 1 < \nu^+\f$.
00063  *
00064  * References:
00065  * [M. Riedmiller and H. Braun. "A direct adaptive method for faster
00066  *  backpropagation learning: The RPROP algorithm." In H. Ruspini, editor,
00067  *  Proceedings of the IEEE International Conference on Neural Networks (ICNN),
00068  *  pages 586 - 591, San Francisco, 1993.]
00069  */
00070 
00071 
00072 
00073 /******************************************/
00074 /* Include Files                          */
00075 /******************************************/
00076 
00077 #include "gnn_utilities.h"
00078 #include "gnn_rprop.h"
00079 
00080 
00081 
00082 /******************************************/
00083 /* Static Declaration                     */
00084 /******************************************/
00085 
00086 typedef struct _gnn_rprop gnn_rprop;
00087 
00088 struct _gnn_rprop
00089 {
00090     gnn_trainer trainer;
00091     gsl_vector *w;
00092     gsl_vector *dwnew;
00093     gsl_vector *dwold;
00094     gsl_vector *deltaw;
00095     gsl_vector *delta;
00096     double nup;
00097     double num;
00098     double deltamax;
00099     double deltamin;
00100     double delta0;
00101 };
00102 
00103 
00104 static int
00105 gnn_rprop_train (gnn_trainer   *trainer);
00106 
00107 static void
00108 gnn_rprop_destroy (gnn_trainer *trainer);
00109 
00110 
00111 
00112 /******************************************/
00113 /* Static Implementation                  */
00114 /******************************************/
00115 
00116 /**
00117  * @brief Reset function.
00118  * @ingroup gnn_rprop_doc
00119  *
00120  * @param  trainer A pointer to a \ref gnn_rprop.
00121  * @return Returns 0 if succeeded.
00122  */
00123 static int
00124 gnn_rprop_reset (gnn_trainer *trainer)
00125 {
00126     gnn_rprop *rtrainer;
00127 
00128     assert (trainer != NULL);
00129 
00130     rtrainer = (gnn_rprop *) trainer;
00131 
00132     /* get parameters */
00133     gnn_node_param_get (trainer->node, rtrainer->w);
00134 
00135     /* reset deltas */
00136     gsl_vector_set_all (rtrainer->delta, rtrainer->delta0);
00137     gsl_vector_set_zero (rtrainer->dwold);
00138 
00139     return 0;
00140 }
00141 
00142 /**
00143  * @brief Train function.
00144  * @ingroup gnn_rprop_doc
00145  *
00146  * @param  trainer A pointer to a \ref gnn_rprop.
00147  * @return Returns 0 if succeeded.
00148  */
00149 static int
00150 gnn_rprop_train (gnn_trainer *trainer)
00151 {
00152     size_t k;
00153     size_t param_size;
00154     gnn_rprop *rtrainer;
00155 
00156     /* get view */
00157     rtrainer = (gnn_rprop *) trainer;
00158 
00159     /* process minibatch */
00160     gnn_trainer_batch_process (trainer);
00161 
00162     /* get a copy of the gradient */
00163     gsl_vector_memcpy (rtrainer->dwnew, gnn_trainer_batch_get_dw (trainer));
00164 
00165     /* move to next minibatch */
00166     gnn_trainer_batch_next (trainer);
00167 
00168     /* get parameter size */
00169     param_size = gnn_node_param_get_size (gnn_trainer_get_node (trainer));
00170 
00171     /* compute steps */
00172     for (k=0; k<param_size; ++k)
00173     {
00174         double dwnewk;
00175         double dwoldk;
00176         double deltak;
00177         double deltawk;
00178         double wk;
00179 
00180         dwnewk  = gsl_vector_get (rtrainer->dwnew, k);
00181         dwoldk  = gsl_vector_get (rtrainer->dwold, k);
00182         deltak  = gsl_vector_get (rtrainer->delta, k);
00183         wk      = gsl_vector_get (rtrainer->w, k);
00184         
00185         if (dwnewk * dwoldk > 0)
00186         {
00187             deltak  = GNN_MIN (rtrainer->nup * deltak, rtrainer->deltamax);
00188             deltawk = - GNN_SIGN (dwnewk) * deltak;
00189             wk = wk + deltawk;
00190             dwoldk = dwnewk;
00191         }
00192         else if (dwnewk * dwoldk < 0)
00193         {
00194             deltak = GNN_MAX (rtrainer->num * deltak, rtrainer->deltamin);
00195             dwoldk = 0;
00196         }
00197         else
00198         {
00199             deltawk = - GNN_SIGN (dwnewk) * deltak;
00200             wk = wk + deltawk;
00201             dwoldk = dwnewk;
00202         }
00203         
00204         gsl_vector_set (rtrainer->dwold, k, dwoldk);
00205         gsl_vector_set (rtrainer->deltaw, k, deltawk);
00206         gsl_vector_set (rtrainer->delta, k, deltak);
00207         gsl_vector_set (rtrainer->w, k, wk);
00208     }
00209 
00210     /* update parameters */
00211     gnn_node_param_set (trainer->node, rtrainer->w);
00212 
00213     return 0;
00214 }
00215 
00216 /**
00217  * @brief Destructor.
00218  * @ingroup gnn_rprop_doc
00219  *
00220  * @param  trainer A pointer to a \ref gnn_momentum.
00221  */
00222 static void
00223 gnn_rprop_destroy (gnn_trainer *trainer)
00224 {
00225     gnn_rprop *rtrainer;
00226 
00227     assert (trainer != NULL);
00228 
00229     rtrainer = (gnn_rprop *) trainer;
00230 
00231     if (rtrainer->w != NULL)
00232         gsl_vector_free (rtrainer->w);
00233     if (rtrainer->dwnew != NULL)
00234         gsl_vector_free (rtrainer->dwnew);
00235     if (rtrainer->dwold != NULL)
00236         gsl_vector_free (rtrainer->dwold);
00237     if (rtrainer->delta != NULL)
00238         gsl_vector_free (rtrainer->delta);
00239 
00240     return;
00241 }
00242 
00243 
00244 
00245 /******************************************/
00246 /* Public Interface                       */
00247 /******************************************/
00248 
00249 /**
00250  * @brief Creates a new RPROP trainer.
00251  * @ingroup gnn_rprop_doc
00252  *
00253  * This function creates a RPROP trainer (\ref gnn_rprop). \f$\delta_0\f$
00254  * is the initial learning rate, and \f$\delta_{max}\f$ is the maximum
00255  * learning rate.
00256  *
00257  * @param  node A pointer to a \ref gnn_node.
00258  * @param  crit A pointer to a \ref gnn_criterion.
00259  * @param  data A pointer to a \ref gnn_dataset.
00260  * @param  delta0   Initial learning rate \f$\delta_0\f$.
00261  * @param  deltamax Maximum learnig rate \f$\delta_{max}\f$.
00262  * @return Returns a pointer to a new \ref gnn_rprop trainer.
00263  */
00264 gnn_trainer *
00265 gnn_rprop_new (gnn_node *node,
00266                gnn_criterion *crit,
00267                gnn_dataset *data,
00268                double delta0,
00269                double deltamax)
00270 {
00271     int status;
00272     size_t param_size;
00273     gnn_trainer *trainer;
00274     gnn_rprop *rtrainer;
00275 
00276     /* check that delta0 isn't negative */
00277     if (delta0 <= 0.0)
00278     {
00279         GSL_ERROR_VAL ("delta0 should be stricly positive",
00280                        GSL_EINVAL, NULL);
00281     }
00282     /* check that deltamax isn't negative */
00283     if (deltamax <= 0.0)
00284     {
00285         GSL_ERROR_VAL ("deltamax should be strictly positive",
00286                        GSL_EINVAL, NULL);
00287     }
00288 
00289     /* allocate memory for the trainer */
00290     rtrainer = (gnn_rprop *) malloc (sizeof (gnn_rprop));
00291     if (rtrainer == NULL)
00292     {
00293         GSL_ERROR_VAL ("couldn't allocate memory for gnn_rprop",
00294                        GSL_ENOMEM, NULL);
00295     }
00296 
00297     /* get view as gnn_trainer */
00298     trainer = (gnn_trainer *) rtrainer;
00299 
00300     /* initialize */
00301     status = gnn_trainer_init (trainer,
00302                                "gnn_rprop",
00303                                node,
00304                                crit,
00305                                data,
00306                                gnn_rprop_reset,
00307                                gnn_rprop_train,
00308                                gnn_rprop_destroy);
00309     if (status)
00310     {
00311         GSL_ERROR_VAL ("couldn't initialize gnn_rprop",
00312                        GSL_EFAILED, NULL);
00313     }
00314 
00315     /* set fields */
00316     param_size = gnn_node_param_get_size (node);
00317 
00318     rtrainer->delta0   = delta0;
00319     rtrainer->deltamin = 0.000001;
00320     rtrainer->deltamax = deltamax;
00321     rtrainer->nup      = 1.2;
00322     rtrainer->num      = 0.5;
00323     rtrainer->w      = gsl_vector_alloc (param_size);
00324     rtrainer->dwold  = gsl_vector_alloc (param_size);
00325     rtrainer->dwnew  = gsl_vector_alloc (param_size);
00326     rtrainer->delta  = gsl_vector_alloc (param_size);
00327     rtrainer->deltaw = gsl_vector_alloc (param_size);
00328     if (   rtrainer->w      == NULL
00329         || rtrainer->dwold  == NULL
00330         || rtrainer->dwnew  == NULL
00331         || rtrainer->delta  == NULL
00332         || rtrainer->deltaw == NULL )
00333     {
00334         gnn_trainer_destroy (trainer);
00335         GSL_ERROR_VAL ("couldn't allocate memory for gnn_rprop",
00336                        GSL_ENOMEM, NULL);
00337     }
00338 
00339     return trainer;
00340 }
00341 
00342 /**
00343  * @brief Creates a standard RPROP trainer.
00344  * @ingroup gnn_rprop_doc
00345  *
00346  * This function creates a RPROP trainer (\ref gnn_rprop). The initial
00347  * learning rate \f$\delta_0\f$ is 0.1, and the maximum learning rate
00348  * \f$\delta_{max}\f$ is 50.
00349  *
00350  * @param  node A pointer to a \ref gnn_node.
00351  * @param  crit A pointer to a \ref gnn_criterion.
00352  * @param  data A pointer to a \ref gnn_dataset.
00353  * @return Returns a pointer to a new \ref gnn_rprop trainer.
00354  */
00355 gnn_trainer *
00356 gnn_rprop_standard_new (gnn_node *node,
00357                         gnn_criterion *crit,
00358                         gnn_dataset *data)
00359 {
00360     return gnn_rprop_new (node, crit, data, 0.1, 50.0);
00361 }
00362 
00363 
00364 /**
00365  * @brief Sets \f$\delta_0\f$.
00366  * @ingroup gnn_rprop_doc
00367  *
00368  * This function sets a new value for \f$\delta_0\f$, the
00369  * initial learning rate.
00370  *
00371  * Its value should be stricly positive.
00372  *
00373  * @param  trainer A pointer to a \ref gnn_rprop.
00374  * @param  delta0  The new value for \f$\delta_0\f$.
00375  * @return Returns 0 if suceeded.
00376  */
00377 int
00378 gnn_rprop_set_delta0 (gnn_trainer *trainer, double delta0)
00379 {
00380     gnn_rprop *rtrainer;
00381 
00382     assert (trainer != NULL);
00383 
00384     /* check value */
00385     if (delta0 <= 0.0)
00386         GSL_ERROR ("delta0 should be strictly positive", GSL_EINVAL);
00387 
00388     /* get view */
00389     rtrainer = (gnn_rprop *) trainer;
00390 
00391     /* set new delta0 */
00392     rtrainer->delta0 = delta0;
00393 
00394     return 0;
00395 }
00396 
00397 /**
00398  * @brief Gets \f$\delta_0\f$.
00399  * @ingroup gnn_rprop_doc
00400  *
00401  * This function returns \f$\delta_0\f$, the initial learning rate.
00402  *
00403  * @param  trainer A pointer to a \ref gnn_rprop.
00404  * @return Returns \f$\delta_0\f$.
00405  */
00406 double
00407 gnn_rprop_get_delta0 (gnn_trainer *trainer)
00408 {
00409     gnn_rprop *rtrainer;
00410 
00411     assert (trainer != NULL);
00412 
00413     rtrainer = (gnn_rprop *) trainer;
00414     return rtrainer->delta0;
00415 }
00416 
00417 /**
00418  * @brief Sets \f$\delta_{min}\f$.
00419  * @ingroup gnn_rprop_doc
00420  *
00421  * This function sets a new value for \f$\delta_{min}\f$, the
00422  * minimum learning rate.
00423  *
00424  * Its value should be stricly positive.
00425  *
00426  * @param  trainer   A pointer to a \ref gnn_rprop.
00427  * @param  deltamin  The new value for \f$\delta_{min}\f$.
00428  * @return Returns 0 if suceeded.
00429  */
00430 int
00431 gnn_rprop_set_deltamin (gnn_trainer *trainer, double deltamin)
00432 {
00433     gnn_rprop *rtrainer;
00434 
00435     assert (trainer != NULL);
00436 
00437     /* check value */
00438     if (deltamin <= 0.0)
00439         GSL_ERROR ("deltamin should be strictly positive", GSL_EINVAL);
00440 
00441     /* get view */
00442     rtrainer = (gnn_rprop *) trainer;
00443 
00444     /* set new deltamin */
00445     rtrainer->deltamin = deltamin;
00446 
00447     return 0;
00448 }
00449 
00450 /**
00451  * @brief Gets \f$\delta_{min}\f$.
00452  * @ingroup gnn_rprop_doc
00453  *
00454  * This function returns \f$\delta_{min}\f$, the minimum learning rate.
00455  *
00456  * @param  trainer A pointer to a \ref gnn_rprop.
00457  * @return Returns \f$\delta_{min}\f$.
00458  */
00459 double
00460 gnn_rprop_get_deltamin (gnn_trainer *trainer)
00461 {
00462     gnn_rprop *rtrainer;
00463 
00464     assert (trainer != NULL);
00465 
00466     rtrainer = (gnn_rprop *) trainer;
00467     return rtrainer->deltamin;
00468 }
00469 
00470 /**
00471  * @brief Sets \f$\delta_{max}\f$.
00472  * @ingroup gnn_rprop_doc
00473  *
00474  * This function sets a new value for \f$\delta_{max}\f$, the
00475  * maximum learning rate.
00476  *
00477  * Its value should be stricly positive.
00478  *
00479  * @param  trainer  A pointer to a \ref gnn_rprop.
00480  * @param  deltamax The new value for \f$\delta_{max}\f$.
00481  * @return Returns 0 if suceeded.
00482  */
00483 int
00484 gnn_rprop_set_deltamax (gnn_trainer *trainer, double deltamax)
00485 {
00486     gnn_rprop *rtrainer;
00487 
00488     assert (trainer != NULL);
00489 
00490     /* check value */
00491     if (deltamax <= 0.0)
00492         GSL_ERROR ("deltamax should be strictly positive", GSL_EINVAL);
00493 
00494     /* get view */
00495     rtrainer = (gnn_rprop *) trainer;
00496 
00497     /* set new learning factor */
00498     rtrainer->deltamax = deltamax;
00499 
00500     return 0;
00501 }
00502 
00503 /**
00504  * @brief Gets \f$\delta_{max}\f$.
00505  * @ingroup gnn_rprop_doc
00506  *
00507  * This function returns \f$\delta_{max}\f$, the maximum learning rate.
00508  *
00509  * @param  trainer A pointer to a \ref gnn_rprop.
00510  * @return Returns \f$\delta_{max}\f$.
00511  */
00512 double
00513 gnn_rprop_get_deltamax (gnn_trainer *trainer)
00514 {
00515     gnn_rprop *rtrainer;
00516 
00517     assert (trainer != NULL);
00518 
00519     rtrainer = (gnn_rprop *) trainer;
00520     return rtrainer->deltamax;
00521 }
00522 
00523 /**
00524  * @brief Sets \f$\nu^+\f$.
00525  * @ingroup gnn_rprop_doc
00526  *
00527  * This function sets a new value for \f$\nu^+\f$, the
00528  * increment rate.
00529  *
00530  * Its value should be greater than 1.
00531  *
00532  * @param  trainer A pointer to a \ref gnn_rprop.
00533  * @param  nup     The new value for \f$\nu^+\f$.
00534  * @return Returns 0 if suceeded.
00535  */
00536 int
00537 gnn_rprop_set_nup (gnn_trainer *trainer, double nup)
00538 {
00539     gnn_rprop *rtrainer;
00540 
00541     assert (trainer != NULL);
00542 
00543     /* check value */
00544     if (nup <= 1.0)
00545         GSL_ERROR ("nup should be greater than 1", GSL_EINVAL);
00546 
00547     /* get view */
00548     rtrainer = (gnn_rprop *) trainer;
00549 
00550     /* set new learning factor */
00551     rtrainer->nup = nup;
00552 
00553     return 0;
00554 }
00555 
00556 /**
00557  * @brief Gets \f$\nu^+\f$.
00558  * @ingroup gnn_rprop_doc
00559  *
00560  * This function returns \f$\nu^+\f$, the increment rate.
00561  *
00562  * @param  trainer A pointer to a \ref gnn_rprop.
00563  * @return Returns \f$\nu^+f$.
00564  */
00565 double
00566 gnn_rprop_get_nup (gnn_trainer *trainer)
00567 {
00568     gnn_rprop *rtrainer;
00569 
00570     assert (trainer != NULL);
00571 
00572     rtrainer = (gnn_rprop *) trainer;
00573     return rtrainer->nup;
00574 }
00575 
00576 /**
00577  * @brief Sets \f$\nu^-\f$.
00578  * @ingroup gnn_rprop_doc
00579  *
00580  * This function sets a new value for \f$\nu^-\f$, the
00581  * decrement rate.
00582  *
00583  * Its value should be within \f$(0;1)\f$.
00584  *
00585  * @param  trainer A pointer to a \ref gnn_rprop.
00586  * @param  num     The new value for \f$\nu^-\f$.
00587  * @return Returns 0 if suceeded.
00588  */
00589 int
00590 gnn_rprop_set_num (gnn_trainer *trainer, double num)
00591 {
00592     gnn_rprop *rtrainer;
00593 
00594     assert (trainer != NULL);
00595 
00596     /* check value */
00597     if (num <= 0.0 || num >= 1.0)
00598         GSL_ERROR ("num should be within (0,1)", GSL_EINVAL);
00599 
00600     /* get view */
00601     rtrainer = (gnn_rprop *) trainer;
00602 
00603     /* set new learning factor */
00604     rtrainer->num = num;
00605 
00606     return 0;
00607 }
00608 
00609 /**
00610  * @brief Gets \f$\nu^-\f$.
00611  * @ingroup gnn_rprop_doc
00612  *
00613  * This function returns \f$\nu^-\f$, the decrement rate.
00614  *
00615  * @param  trainer A pointer to a \ref gnn_rprop.
00616  * @return Returns \f$\nu^-f$.
00617  */
00618 double
00619 gnn_rprop_get_num (gnn_trainer *trainer)
00620 {
00621     gnn_rprop *rtrainer;
00622 
00623     assert (trainer != NULL);
00624 
00625     rtrainer = (gnn_rprop *) trainer;
00626     return rtrainer->num;
00627 }
00628 

Generated on Sun Jun 13 20:50:12 2004 for libgnn Gradient Retropropagation Machine Library by doxygen1.2.18