Package Bio :: Package NeuralNetwork :: Package BackPropagation :: Module Layer
[hide private]
[frames] | no frames]

Source Code for Module Bio.NeuralNetwork.BackPropagation.Layer

  1  # This code is part of the Biopython distribution and governed by its 
  2  # license.  Please see the LICENSE file that should have been included 
  3  # as part of this package. 
  4  # 
  5   
  6  """Model a single layer in a nueral network. 
  7   
  8  These classes deal with a layers in the neural network (ie. the input layer, 
  9  hidden layers and the output layer). 
 10  """ 
 11  # standard library 
 12  import math 
 13  import random 
 14   
 15  from Bio._py3k import range 
 16   
 17   
18 -def logistic_function(value):
19 """Transform the value with the logistic function.""" 20 # TODO: This is in the wrong place -- I need to find a place to put it 21 # that makes sense. 22 return 1.0 / (1.0 + math.exp(-value))
23 24
25 -class AbstractLayer(object):
26 """Abstract base class for all layers.""" 27
28 - def __init__(self, num_nodes, has_bias_node):
29 """Initialize the layer. 30 31 Arguments: 32 - num_nodes -- The number of nodes that are contained in this layer. 33 - has_bias_node -- Specify whether or not this node has a bias 34 node. This node is not included in the number of nodes in the 35 network, but is used in constructing and dealing with the network. 36 37 """ 38 # specify all of the nodes in the network 39 if has_bias_node: 40 lower_range = 0 41 else: 42 lower_range = 1 43 44 self.nodes = list(range(lower_range, num_nodes + 1)) 45 46 self.weights = {}
47
48 - def __str__(self):
49 """Debugging output.""" 50 return "weights: %s" % self.weights
51
52 - def set_weight(self, this_node, next_node, value):
53 """Set a weight value from one node to the next. 54 55 If weights are not explicitly set, they will be initialized to 56 random values to start with. 57 """ 58 if (this_node, next_node) not in self.weights: 59 raise ValueError("Invalid node values passed.") 60 61 self.weights[(this_node, next_node)] = value
62 63
64 -class InputLayer(AbstractLayer):
65 - def __init__(self, num_nodes, next_layer):
66 """Initialize the input layer. 67 68 Arguments: 69 - num_nodes -- The number of nodes in the input layer. 70 - next_layer -- The next layer in the neural network this is 71 connected to. 72 73 """ 74 AbstractLayer.__init__(self, num_nodes, 1) 75 76 self._next_layer = next_layer 77 78 # set up the weights 79 self.weights = {} 80 for own_node in self.nodes: 81 for other_node in self._next_layer.nodes: 82 self.weights[(own_node, other_node)] = \ 83 random.randrange(-2.0, 2.0) 84 85 # set up the weight changes 86 self.weight_changes = {} 87 for own_node in self.nodes: 88 for other_node in self._next_layer.nodes: 89 self.weight_changes[(own_node, other_node)] = 0.0 90 91 # set up the calculated values for each node -- these will 92 # actually just be set from inputs into the network. 93 self.values = {} 94 for node in self.nodes: 95 # set the bias node -- always has a value of 1 96 if node == 0: 97 self.values[0] = 1 98 else: 99 self.values[node] = 0
100
101 - def update(self, inputs):
102 """Update the values of the nodes using given inputs. 103 104 Arguments: 105 - inputs -- A list of inputs into the network -- this must be 106 equal to the number of nodes in the layer. 107 108 """ 109 if len(inputs) != len(self.values) - 1: 110 raise ValueError("Inputs do not match input layer nodes.") 111 112 # set the node values from the inputs 113 for input_num in range(len(inputs)): 114 self.values[input_num + 1] = inputs[input_num] 115 116 # propagate the update to the next layer 117 self._next_layer.update(self)
118
119 - def backpropagate(self, outputs, learning_rate, momentum):
120 """Recalculate all weights based on the last round of prediction. 121 122 Arguments: 123 - learning_rate -- The learning rate of the network 124 - momentum - The amount of weight to place on the previous weight 125 change. 126 - outputs - The output info we are using to calculate error. 127 128 """ 129 # first backpropagate to the next layers 130 next_errors = self._next_layer.backpropagate(outputs, learning_rate, 131 momentum) 132 133 for this_node in self.nodes: 134 for next_node in self._next_layer.nodes: 135 error_deriv = (next_errors[next_node] * 136 self.values[this_node]) 137 138 delta = (learning_rate * error_deriv + 139 momentum * self.weight_changes[(this_node, next_node)]) 140 141 # apply the change to the weight 142 self.weights[(this_node, next_node)] += delta 143 144 # remember the weight change for next time 145 self.weight_changes[(this_node, next_node)] = delta
146 147
148 -class HiddenLayer(AbstractLayer):
149 - def __init__(self, num_nodes, next_layer, activation=logistic_function):
150 """Initialize a hidden layer. 151 152 Arguments: 153 - num_nodes -- The number of nodes in this hidden layer. 154 - next_layer -- The next layer in the neural network that this 155 is connected to. 156 - activation -- The transformation function used to transform 157 predicted values. 158 159 """ 160 AbstractLayer.__init__(self, num_nodes, 1) 161 162 self._next_layer = next_layer 163 self._activation = activation 164 165 # set up the weights 166 self.weights = {} 167 for own_node in self.nodes: 168 for other_node in self._next_layer.nodes: 169 self.weights[(own_node, other_node)] = \ 170 random.randrange(-2.0, 2.0) 171 172 # set up the weight changes 173 self.weight_changes = {} 174 for own_node in self.nodes: 175 for other_node in self._next_layer.nodes: 176 self.weight_changes[(own_node, other_node)] = 0.0 177 178 # set up the calculated values for each node 179 self.values = {} 180 for node in self.nodes: 181 # bias node 182 if node == 0: 183 self.values[node] = 1 184 else: 185 self.values[node] = 0
186
187 - def update(self, previous_layer):
188 """Update the values of nodes from the previous layer info. 189 190 Arguments: 191 - previous_layer -- The previous layer in the network. 192 193 """ 194 # update each node in this network 195 for update_node in self.nodes[1:]: 196 # sum up the weighted inputs from the previous network 197 sum = 0.0 198 for node in previous_layer.nodes: 199 sum += (previous_layer.values[node] * 200 previous_layer.weights[(node, update_node)]) 201 202 self.values[update_node] = self._activation(sum) 203 204 # propagate the update to the next layer 205 self._next_layer.update(self)
206
207 - def backpropagate(self, outputs, learning_rate, momentum):
208 """Recalculate all weights based on the last round of prediction. 209 210 Arguments: 211 - learning_rate -- The learning rate of the network 212 - momentum - The amount of weight to place on the previous weight 213 change. 214 - outputs - The output values we are using to see how good our 215 network is at predicting things. 216 217 """ 218 # first backpropagate to the next layers 219 next_errors = self._next_layer.backpropagate(outputs, learning_rate, 220 momentum) 221 222 # --- update the weights 223 for this_node in self.nodes: 224 for next_node in self._next_layer.nodes: 225 error_deriv = (next_errors[next_node] * 226 self.values[this_node]) 227 228 delta = (learning_rate * error_deriv + 229 momentum * self.weight_changes[(this_node, next_node)]) 230 231 # apply the change to the weight 232 self.weights[(this_node, next_node)] += delta 233 234 # remember the weight change for next time 235 self.weight_changes[(this_node, next_node)] = delta 236 237 # --- calculate error terms 238 errors = {} 239 for error_node in self.nodes: 240 # get the error info propagated from the next layer 241 previous_error = 0.0 242 for next_node in self._next_layer.nodes: 243 previous_error += (next_errors[next_node] * 244 self.weights[(error_node, next_node)]) 245 246 # get the correction factor 247 corr_factor = (self.values[error_node] * 248 (1 - self.values[error_node])) 249 250 # calculate the error 251 errors[error_node] = previous_error * corr_factor 252 253 return errors
254 255
256 -class OutputLayer(AbstractLayer):
257 - def __init__(self, num_nodes, activation=logistic_function):
258 """Initialize the Output Layer. 259 260 Arguments: 261 - num_nodes -- The number of nodes in this layer. This corresponds 262 to the number of outputs in the neural network. 263 - activation -- The transformation function used to transform 264 predicted values. 265 266 """ 267 AbstractLayer.__init__(self, num_nodes, 0) 268 269 self._activation = activation 270 271 self.values = {} 272 for node in self.nodes: 273 self.values[node] = 0
274
275 - def update(self, previous_layer):
276 """Update the value of output nodes from the previous layers. 277 278 Arguments: 279 - previous_layer -- The hidden layer preceding this. 280 281 """ 282 # update all of the nodes in this layer 283 for update_node in self.nodes: 284 # sum up the contribution from all of the previous inputs 285 sum = 0.0 286 for node in previous_layer.nodes: 287 sum += (previous_layer.values[node] * 288 previous_layer.weights[(node, update_node)]) 289 290 self.values[update_node] = self._activation(sum)
291
292 - def backpropagate(self, outputs, learning_rate, momentum):
293 """Calculate the backpropagation error at a given node. 294 295 This calculates the error term using the formula: 296 297 p = (z - t) z (1 - z) 298 299 where z is the calculated value for the node, and t is the 300 real value. 301 302 Arguments: 303 - outputs - The list of output values we use to calculate the 304 errors in our predictions. 305 306 """ 307 errors = {} 308 for node in self.nodes: 309 calculated_value = self.values[node] 310 real_value = outputs[node - 1] 311 312 errors[node] = ((real_value - calculated_value) * 313 calculated_value * 314 (1 - calculated_value)) 315 316 return errors
317
318 - def get_error(self, real_value, node_number):
319 """Return the error value at a particular node.""" 320 predicted_value = self.values[node_number] 321 return 0.5 * math.pow((real_value - predicted_value), 2)
322
323 - def set_weight(self, this_node, next_node, value):
324 raise NotImplementedError("Can't set weights for the output layer")
325