Package Bio :: Package NeuralNetwork :: Package BackPropagation :: Module Layer
[hide private]
[frames] | no frames]

Source Code for Module Bio.NeuralNetwork.BackPropagation.Layer

  1  # This code is part of the Biopython distribution and governed by its 
  2  # license.  Please see the LICENSE file that should have been included 
  3  # as part of this package. 
  4  # 
  5   
  6  """Model a single layer in a nueral network. 
  7   
  8  These classes deal with a layers in the neural network (ie. the input layer, 
  9  hidden layers and the output layer). 
 10  """ 
 11  # standard library 
 12  import math 
 13  import random 
 14   
 15  from Bio._py3k import range 
 16   
 17   
18 -def logistic_function(value):
19 """Transform the value with the logistic function. 20 21 XXX This is in the wrong place -- I need to find a place to put it 22 that makes sense. 23 """ 24 return 1.0 / (1.0 + math.exp(-value))
25 26
27 -class AbstractLayer(object):
28 """Abstract base class for all layers. 29 """
30 - def __init__(self, num_nodes, has_bias_node):
31 """Initialize the layer. 32 33 Arguments: 34 35 o num_nodes -- The number of nodes that are contained in this layer. 36 37 o has_bias_node -- Specify whether or not this node has a bias 38 node. This node is not included in the number of nodes in the network, 39 but is used in constructing and dealing with the network. 40 """ 41 # specify all of the nodes in the network 42 if has_bias_node: 43 lower_range = 0 44 else: 45 lower_range = 1 46 47 self.nodes = list(range(lower_range, num_nodes + 1)) 48 49 self.weights = {}
50
51 - def __str__(self):
52 """Debugging output. 53 """ 54 return "weights: %s" % self.weights
55
56 - def set_weight(self, this_node, next_node, value):
57 """Set a weight value from one node to the next. 58 59 If weights are not explicitly set, they will be initialized to 60 random values to start with. 61 """ 62 if (this_node, next_node) not in self.weights: 63 raise ValueError("Invalid node values passed.") 64 65 self.weights[(this_node, next_node)] = value
66 67
68 -class InputLayer(AbstractLayer):
69 - def __init__(self, num_nodes, next_layer):
70 """Initialize the input layer. 71 72 Arguments: 73 74 o num_nodes -- The number of nodes in the input layer. 75 76 o next_layer -- The next layer in the neural network this is 77 connected to. 78 """ 79 AbstractLayer.__init__(self, num_nodes, 1) 80 81 self._next_layer = next_layer 82 83 # set up the weights 84 self.weights = {} 85 for own_node in self.nodes: 86 for other_node in self._next_layer.nodes: 87 self.weights[(own_node, other_node)] = \ 88 random.randrange(-2.0, 2.0) 89 90 # set up the weight changes 91 self.weight_changes = {} 92 for own_node in self.nodes: 93 for other_node in self._next_layer.nodes: 94 self.weight_changes[(own_node, other_node)] = 0.0 95 96 # set up the calculated values for each node -- these will 97 # actually just be set from inputs into the network. 98 self.values = {} 99 for node in self.nodes: 100 # set the bias node -- always has a value of 1 101 if node == 0: 102 self.values[0] = 1 103 else: 104 self.values[node] = 0
105
106 - def update(self, inputs):
107 """Update the values of the nodes using given inputs. 108 109 Arguments: 110 111 o inputs -- A list of inputs into the network -- this must be 112 equal to the number of nodes in the layer. 113 """ 114 if len(inputs) != len(self.values) - 1: 115 raise ValueError("Inputs do not match input layer nodes.") 116 117 # set the node values from the inputs 118 for input_num in range(len(inputs)): 119 self.values[input_num + 1] = inputs[input_num] 120 121 # propagate the update to the next layer 122 self._next_layer.update(self)
123
124 - def backpropagate(self, outputs, learning_rate, momentum):
125 """Recalculate all weights based on the last round of prediction. 126 127 Arguments: 128 129 o learning_rate -- The learning rate of the network 130 131 o momentum - The amount of weight to place on the previous weight 132 change. 133 134 o outputs - The output info we are using to calculate error. 135 """ 136 # first backpropagate to the next layers 137 next_errors = self._next_layer.backpropagate(outputs, learning_rate, 138 momentum) 139 140 for this_node in self.nodes: 141 for next_node in self._next_layer.nodes: 142 error_deriv = (next_errors[next_node] * 143 self.values[this_node]) 144 145 delta = (learning_rate * error_deriv + 146 momentum * self.weight_changes[(this_node, next_node)]) 147 148 # apply the change to the weight 149 self.weights[(this_node, next_node)] += delta 150 151 # remember the weight change for next time 152 self.weight_changes[(this_node, next_node)] = delta
153 154
155 -class HiddenLayer(AbstractLayer):
156 - def __init__(self, num_nodes, next_layer, activation=logistic_function):
157 """Initialize a hidden layer. 158 159 Arguments: 160 161 o num_nodes -- The number of nodes in this hidden layer. 162 163 o next_layer -- The next layer in the neural network that this 164 is connected to. 165 166 o activation -- The transformation function used to transform 167 predicted values. 168 """ 169 AbstractLayer.__init__(self, num_nodes, 1) 170 171 self._next_layer = next_layer 172 self._activation = activation 173 174 # set up the weights 175 self.weights = {} 176 for own_node in self.nodes: 177 for other_node in self._next_layer.nodes: 178 self.weights[(own_node, other_node)] = \ 179 random.randrange(-2.0, 2.0) 180 181 # set up the weight changes 182 self.weight_changes = {} 183 for own_node in self.nodes: 184 for other_node in self._next_layer.nodes: 185 self.weight_changes[(own_node, other_node)] = 0.0 186 187 # set up the calculated values for each node 188 self.values = {} 189 for node in self.nodes: 190 # bias node 191 if node == 0: 192 self.values[node] = 1 193 else: 194 self.values[node] = 0
195
196 - def update(self, previous_layer):
197 """Update the values of nodes from the previous layer info. 198 199 Arguments: 200 201 o previous_layer -- The previous layer in the network. 202 """ 203 # update each node in this network 204 for update_node in self.nodes[1:]: 205 # sum up the weighted inputs from the previous network 206 sum = 0.0 207 for node in previous_layer.nodes: 208 sum += (previous_layer.values[node] * 209 previous_layer.weights[(node, update_node)]) 210 211 self.values[update_node] = self._activation(sum) 212 213 # propagate the update to the next layer 214 self._next_layer.update(self)
215
216 - def backpropagate(self, outputs, learning_rate, momentum):
217 """Recalculate all weights based on the last round of prediction. 218 219 Arguments: 220 221 o learning_rate -- The learning rate of the network 222 223 o momentum - The amount of weight to place on the previous weight 224 change. 225 226 o outputs - The output values we are using to see how good our 227 network is at predicting things. 228 """ 229 # first backpropagate to the next layers 230 next_errors = self._next_layer.backpropagate(outputs, learning_rate, 231 momentum) 232 233 # --- update the weights 234 for this_node in self.nodes: 235 for next_node in self._next_layer.nodes: 236 error_deriv = (next_errors[next_node] * 237 self.values[this_node]) 238 239 delta = (learning_rate * error_deriv + 240 momentum * self.weight_changes[(this_node, next_node)]) 241 242 # apply the change to the weight 243 self.weights[(this_node, next_node)] += delta 244 245 # remember the weight change for next time 246 self.weight_changes[(this_node, next_node)] = delta 247 248 # --- calculate error terms 249 errors = {} 250 for error_node in self.nodes: 251 # get the error info propagated from the next layer 252 previous_error = 0.0 253 for next_node in self._next_layer.nodes: 254 previous_error += (next_errors[next_node] * 255 self.weights[(error_node, next_node)]) 256 257 # get the correction factor 258 corr_factor = (self.values[error_node] * 259 (1 - self.values[error_node])) 260 261 # calculate the error 262 errors[error_node] = previous_error * corr_factor 263 264 return errors
265 266
267 -class OutputLayer(AbstractLayer):
268 - def __init__(self, num_nodes, activation=logistic_function):
269 """Initialize the Output Layer. 270 271 Arguments: 272 273 o num_nodes -- The number of nodes in this layer. This corresponds 274 to the number of outputs in the neural network. 275 276 o activation -- The transformation function used to transform 277 predicted values. 278 """ 279 AbstractLayer.__init__(self, num_nodes, 0) 280 281 self._activation = activation 282 283 self.values = {} 284 for node in self.nodes: 285 self.values[node] = 0
286
287 - def update(self, previous_layer):
288 """Update the value of output nodes from the previous layers. 289 290 Arguments: 291 292 o previous_layer -- The hidden layer preceding this. 293 """ 294 # update all of the nodes in this layer 295 for update_node in self.nodes: 296 # sum up the contribution from all of the previous inputs 297 sum = 0.0 298 for node in previous_layer.nodes: 299 sum += (previous_layer.values[node] * 300 previous_layer.weights[(node, update_node)]) 301 302 self.values[update_node] = self._activation(sum)
303
304 - def backpropagate(self, outputs, learning_rate, momentum):
305 """Calculate the backpropagation error at a given node. 306 307 This calculates the error term using the formula: 308 309 p = (z - t) z (1 - z) 310 311 where z is the calculated value for the node, and t is the 312 real value. 313 314 Arguments: 315 316 o outputs - The list of output values we use to calculate the 317 errors in our predictions. 318 """ 319 errors = {} 320 for node in self.nodes: 321 calculated_value = self.values[node] 322 real_value = outputs[node - 1] 323 324 errors[node] = ((real_value - calculated_value) * 325 calculated_value * 326 (1 - calculated_value)) 327 328 return errors
329
330 - def get_error(self, real_value, node_number):
331 """Return the error value at a particular node. 332 """ 333 predicted_value = self.values[node_number] 334 return 0.5 * math.pow((real_value - predicted_value), 2)
335
336 - def set_weight(self, this_node, next_node, value):
337 raise NotImplementedError("Can't set weights for the output layer")
338