controllernet.h

Go to the documentation of this file.
00001 /***************************************************************************
00002  *   Copyright (C) 2005-2011 LpzRobots development team                    *
00003  *    Georg Martius  <georg dot martius at web dot de>                     *
00004  *    Frank Guettler <guettler at informatik dot uni-leipzig dot de        *
00005  *    Frank Hesse    <frank at nld dot ds dot mpg dot de>                  *
00006  *    Ralf Der       <ralfder at mis dot mpg dot de>                       *
00007  *                                                                         *
00008  *   This program is free software; you can redistribute it and/or modify  *
00009  *   it under the terms of the GNU General Public License as published by  *
00010  *   the Free Software Foundation; either version 2 of the License, or     *
00011  *   (at your option) any later version.                                   *
00012  *                                                                         *
00013  *   This program is distributed in the hope that it will be useful,       *
00014  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
00015  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
00016  *   GNU General Public License for more details.                          *
00017  *                                                                         *
00018  *   You should have received a copy of the GNU General Public License     *
00019  *   along with this program; if not, write to the                         *
00020  *   Free Software Foundation, Inc.,                                       *
00021  *   59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.             *
00022  *                                                                         *
00023  ***************************************************************************/
00024 #ifndef __CONTROLLERNET_H
00025 #define __CONTROLLERNET_H
00026 
00027 #include <vector>
00028 
00029 #include "feedforwardnn.h"
00030 #include "layer.h"
00031 
00032 /** multi layer neural network with configurable activation functions
00033     and propagation and projection methods suitable for homeokinesis controller
00034  */
00035 class ControllerNet : public Configurable {
00036 public:     
00037 
00038   /**
00039      @param layers Layer description (the input layer is not specified (always linear))
00040      @param useBypass if true, then a connection from input to output layer is included
00041   */
00042   ControllerNet(const std::vector<Layer>& layers, bool useBypass=false);
00043   virtual ~ControllerNet(){ }
00044   
00045   /** initialisation of the network with the given number of input and output units.
00046       The dimensionality of the ouputlayer is automatically adjusted.
00047       @param unit_map defines the approximate response of the network 
00048        after initialisation (if unit_map=1 the weights are unit matrices).
00049       @param randGen pointer to random generator, if 0 an new one is used
00050    */
00051   virtual void init(unsigned int inputDim, unsigned  int outputDim, 
00052                     double unit_map = 0.0, double rand = 0.2, RandGen* randGen = 0); 
00053 
00054   /** passive processing of the input. 
00055       This has to be done before calling reponse, and the back/forward propagation/projection functions.
00056       The activations and the response matrix are stored internally.
00057    */
00058   virtual const matrix::Matrix process (const matrix::Matrix& input); 
00059 
00060   /** like process just with the opportunity to overwrite the activation of
00061       a specific layer
00062       @param injections the input that is clamped at layer injectInLayer
00063       @param injectInLayer the injection is clamped at this layer
00064    */
00065   virtual const matrix::Matrix processX (const matrix::Matrix& input,
00066                                          const matrix::Matrix& injection,
00067                                          unsigned int injectInLayer); 
00068 
00069   /// damps the weights and the biases by multiplying (1-damping)
00070   virtual void damp(double damping);
00071 
00072   /** response matrix of neural network (for current activation, see process)
00073   \f[  J_ij = \frac{\partial y_i}{\partial x_j} \f]
00074   \f[  J = G_n' W_n G_{n-1}' W_{n-1} ... G_1' W_1 \f]
00075   with \f$W_n\f$ is the weight matrix of layer n and 
00076   \f$ G'\f$ is a diagonal matrix with \f$ G'_ii = g'_i \f$ as values on the diagonal.
00077   */
00078   virtual const matrix::Matrix& response() const;
00079 
00080   /** like response, just that only a range of layers is considered 
00081       The Bypass is not considered here.
00082       @param from index of layer to start: -1 at input, 0 first hidden layer ...
00083       @param to index of layer to stop: -1: last layer, 0 first hidden layer ...
00084    */
00085   virtual matrix::Matrix responsePart(int from, int to) const;
00086 
00087 
00088   /** linear response matrix of neural network
00089   \f[  R = W_n W_{n-1} ... W_1 \f]
00090     with \f$W_n\f$ is the weight matrix of layer n.
00091   */
00092   virtual const matrix::Matrix& responseLinear() const;
00093 
00094   /** backpropagation of vector error through network.      
00095       The storage for the intermediate values (errors, zetas) do not need to be given.
00096       The errors(layerwise) are at the output of the neurons 
00097       (index 0 is at the input level, output of layer 0 has index 1 and so on)
00098       The zetas(layerwise) are the values inside the neurons that
00099       arise when backpropagating the error signal. (zeta[0] is at layer 0)
00100       @return errors[0] (result of backpropagation)
00101    */
00102   virtual const matrix::Matrix backpropagation(const matrix::Matrix& error, 
00103                                                matrix::Matrices* errors = 0, 
00104                                                matrix::Matrices* zetas = 0) const;
00105 
00106   /** like backpropagation but with special features: we can start from any layer
00107       and the bypass-discounting can be used (see disseration Georg Martius)
00108       WARNING: the errors and zetas above the `startWithLayer' are undefined
00109       @param startWithLayer the error is clamped at this layer and the processing starts there 
00110         (-1: output layer)
00111       @see backpropagation
00112    */
00113   virtual const matrix::Matrix backpropagationX(const matrix::Matrix& error, 
00114                                                 matrix::Matrices* errors = 0, 
00115                                                 matrix::Matrices* zetas = 0,
00116                                                 int startWithLayer = -1) const;
00117 
00118 
00119   /** backprojection of vector error through network.
00120       The storage for the intermediate values (errors, zetas) do not need to be given.
00121       The errors(layerwise) are at the output of the neurons 
00122       (index 0 is at the input level, output of layer 0 has index 1 and so on)
00123       The zetas(layerwise) are the values inside the neurons that
00124       arise when backprojecting the error signal. (zeta[0] is at layer 0)
00125       @return errors[0] (result of backprojecting)
00126    */
00127   virtual const matrix::Matrix backprojection(const matrix::Matrix& error, 
00128                                               matrix::Matrices* errors = 0,
00129                                               matrix::Matrices* zetas = 0) const;
00130 
00131   
00132   /** forwardpropagation of vector error through network.
00133       The storage for the intermediate values (errors, zetas) do not need to be given.
00134       The errors(layerwise) are at the output of the neurons 
00135       (index 0 is at the input level = error, output of layer 0 has index 1 and so on)
00136       The zetas(layerwise) are the values inside the neurons that
00137       arise when forwardpropagate the error signal. (zeta[0] is at layer 0)
00138       @return errors[layernum] (result of forwardpropagation)
00139    */
00140   virtual const matrix::Matrix forwardpropagation(const matrix::Matrix& error, 
00141                                                matrix::Matrices* errors = 0, 
00142                                                matrix::Matrices* zetas = 0) const;
00143 
00144   /** forwardprojection of vector error through network.
00145       The storage for the intermediate values (errors, zetas) do not need to be given.
00146       The errors(layerwise) are at the output of the neurons 
00147       (index 0 is at the input level = error, output of layer 0 has index 1 and so on)
00148       The zetas(layerwise) are the values inside the neurons that
00149       arise when forwardprojecting the error signal. (zeta[0] is at layer 0)
00150       @return errors[layernum] (result of forwardprojection)
00151    */
00152   virtual const matrix::Matrix forwardprojection(const matrix::Matrix& error, 
00153                                                matrix::Matrices* errors = 0, 
00154                                                matrix::Matrices* zetas = 0) const;
00155 
00156   /// returns the number of input neurons
00157   virtual unsigned int getInputDim() const { 
00158     return weights[0].getN(); 
00159   }
00160   /// returns the number of output neurons
00161   virtual unsigned int getOutputDim() const { 
00162     return (weights.rbegin())->getM(); 
00163   }
00164 
00165   /** returns activation of the given layer. Layer 0 is the first hidden layer. 
00166       Negative values count from the end (-1 is the last layer)
00167    */
00168   virtual const matrix::Matrix& getLayerOutput(int layer) const {
00169     if(layer<0) layer = layers.size() + layer;
00170     assert(layer>=0 && layer < (int)layers.size());
00171     return y[layer];
00172   }
00173 
00174   // total number of layers (1 means no hidden units)
00175   virtual unsigned int getLayerNum() const {
00176     return layers.size();
00177   }
00178 
00179   /// layers 0 is the first hidden layer
00180   virtual const Layer& getLayer(unsigned int layer) const {
00181     assert(layer < layers.size());
00182     return layers[layer];
00183   }
00184 
00185   /// layers 0 is the first hidden layer
00186   virtual Layer& getLayer(unsigned int layer) {
00187     assert(layer < layers.size());
00188     return layers[layer];
00189   }
00190 
00191   /** weight matrix 0 connects input with the first hidden layer
00192       Negative values count from the end (-1 is the last layer)
00193   */
00194   virtual const matrix::Matrix& getWeights(int to_layer) const {
00195     if(to_layer<0) to_layer  = weights.size() - to_layer;
00196     assert(to_layer>=0 && to_layer < (int)weights.size());
00197     return weights[to_layer];
00198   }
00199 
00200   /** weight matrix 0 connects input with the first hidden layer
00201       Negative values count from the end (-1 is the last layer)
00202   */
00203   virtual matrix::Matrix& getWeights(int to_layer) {
00204     if(to_layer<0) to_layer  = weights.size() - to_layer;
00205     assert(to_layer>=0 && to_layer < (int)weights.size());
00206     return weights[to_layer];
00207   }
00208 
00209   virtual const matrix::Matrix& getByPass() const  {
00210     assert(useBypass);
00211     return bypassWeights;
00212   }
00213 
00214   virtual matrix::Matrix& getByPass() {
00215     assert(useBypass);
00216     return bypassWeights;
00217   }
00218 
00219   /** Note: layers 0 is the first hidden layer
00220       Negative values count from the end (-1 is the last layer)
00221   */
00222   virtual const matrix::Matrix& getBias(int of_layer) const {
00223     if(of_layer<0) of_layer  = bias.size() - of_layer;
00224     assert(of_layer>=0 && of_layer < (int)bias.size());
00225     return bias[of_layer];
00226   }
00227 
00228   /** Note: layers 0 is the first hidden layer
00229       Negative values count from the end (-1 is the last layer)
00230   */
00231   virtual matrix::Matrix& getBias(int of_layer) {
00232     if(of_layer<0) of_layer  = bias.size() - of_layer;
00233     assert(of_layer>=0 && of_layer < (int)bias.size());
00234     return bias[of_layer];
00235   }
00236 
00237   /**************  STOREABLE **********************************/
00238   /// stores the layer binary into file stream
00239   bool store(FILE* f) const;
00240   /// restores the layer binary from file stream
00241   bool restore(FILE* f);
00242   
00243   /// writes the layer ASCII into file stream (not in the storable interface)
00244   bool write(FILE* f) const;
00245 
00246 protected:
00247   // actually calculate the jacobian and stores it in L, see response()
00248   virtual void calcResponseIntern();
00249   
00250 
00251 protected:
00252   std::vector<Layer> layers;
00253   std::vector<matrix::Matrix> weights;
00254   std::vector<matrix::Matrix> bias;
00255   bool useBypass;
00256   matrix::Matrix bypassWeights;
00257 
00258   /*** storage variables ****/
00259   ///
00260   matrix::Matrix input;
00261   matrix::Matrices y; // activations
00262   matrix::Matrices z; // potentials
00263   matrix::Matrices gp; // g'
00264 
00265   matrix::Matrix L; // jacobian (or response) matrix
00266   matrix::Matrix R; // linearized jacobian matrix
00267 
00268   double lambda;   // regularisation value for pseudoinverse
00269   bool initialised;
00270 };
00271 
00272 #endif
Generated on Thu Jun 28 14:45:36 2012 for Robot Simulator of the Robotics Group for Self-Organization of Control by  doxygen 1.6.3