Package Bio :: Module MaxEntropy
[hide private]
[frames] | no frames]

Source Code for Module Bio.MaxEntropy

  1  # Copyright 2001 by Jeffrey Chang.  All rights reserved. 
  2  # This code is part of the Biopython distribution and governed by its 
  3  # license.  Please see the LICENSE file that should have been included 
  4  # as part of this package. 
  5   
  6  """Maximum Entropy code. 
  7   
  8  Uses Improved Iterative Scaling. 
  9  """ 
 10  # TODO Define terminology 
 11   
 12  from __future__ import print_function 
 13  from functools import reduce 
 14   
 15  from Bio._py3k import map 
 16   
 17  import numpy 
 18   
 19   
20 -class MaxEntropy(object):
21 """Holds information for a Maximum Entropy classifier. 22 23 Members: 24 classes List of the possible classes of data. 25 alphas List of the weights for each feature. 26 feature_fns List of the feature functions. 27 28 """
29 - def __init__(self):
30 self.classes = [] 31 self.alphas = [] 32 self.feature_fns = []
33 34
35 -def calculate(me, observation):
36 """calculate(me, observation) -> list of log probs 37 38 Calculate the log of the probability for each class. me is a 39 MaxEntropy object that has been trained. observation is a vector 40 representing the observed data. The return value is a list of 41 unnormalized log probabilities for each class. 42 43 """ 44 scores = [] 45 assert len(me.feature_fns) == len(me.alphas) 46 for klass in me.classes: 47 lprob = 0.0 48 for fn, alpha in zip(me.feature_fns, me.alphas): 49 lprob += fn(observation, klass) * alpha 50 scores.append(lprob) 51 return scores
52 53
54 -def classify(me, observation):
55 """classify(me, observation) -> class 56 57 Classify an observation into a class. 58 59 """ 60 scores = calculate(me, observation) 61 max_score, klass = scores[0], me.classes[0] 62 for i in range(1, len(scores)): 63 if scores[i] > max_score: 64 max_score, klass = scores[i], me.classes[i] 65 return klass
66 67
68 -def _eval_feature_fn(fn, xs, classes):
69 """_eval_feature_fn(fn, xs, classes) -> dict of values 70 71 Evaluate a feature function on every instance of the training set 72 and class. fn is a callback function that takes two parameters: a 73 training instance and a class. Return a dictionary of (training 74 set index, class index) -> non-zero value. Values of 0 are not 75 stored in the dictionary. 76 77 """ 78 values = {} 79 for i in range(len(xs)): 80 for j in range(len(classes)): 81 f = fn(xs[i], classes[j]) 82 if f != 0: 83 values[(i, j)] = f 84 return values
85 86
87 -def _calc_empirical_expects(xs, ys, classes, features):
88 """_calc_empirical_expects(xs, ys, classes, features) -> list of expectations 89 90 Calculate the expectation of each function from the data. This is 91 the constraint for the maximum entropy distribution. Return a 92 list of expectations, parallel to the list of features. 93 94 """ 95 # E[f_i] = SUM_x,y P(x, y) f(x, y) 96 # = 1/N f(x, y) 97 class2index = {} 98 for index, key in enumerate(classes): 99 class2index[key] = index 100 ys_i = [class2index[y] for y in ys] 101 102 expect = [] 103 N = len(xs) 104 for feature in features: 105 s = 0 106 for i in range(N): 107 s += feature.get((i, ys_i[i]), 0) 108 expect.append(float(s) / N) 109 return expect
110 111
112 -def _calc_model_expects(xs, classes, features, alphas):
113 """_calc_model_expects(xs, classes, features, alphas) -> list of expectations. 114 115 Calculate the expectation of each feature from the model. This is 116 not used in maximum entropy training, but provides a good function 117 for debugging. 118 119 """ 120 # SUM_X P(x) SUM_Y P(Y|X) F(X, Y) 121 # = 1/N SUM_X SUM_Y P(Y|X) F(X, Y) 122 p_yx = _calc_p_class_given_x(xs, classes, features, alphas) 123 124 expects = [] 125 for feature in features: 126 sum = 0.0 127 for (i, j), f in feature.items(): 128 sum += p_yx[i][j] * f 129 expects.append(sum / len(xs)) 130 return expects
131 132
133 -def _calc_p_class_given_x(xs, classes, features, alphas):
134 """_calc_p_class_given_x(xs, classes, features, alphas) -> matrix 135 136 Calculate P(y|x), where y is the class and x is an instance from 137 the training set. Return a XSxCLASSES matrix of probabilities. 138 139 """ 140 prob_yx = numpy.zeros((len(xs), len(classes))) 141 142 # Calculate log P(y, x). 143 assert len(features) == len(alphas) 144 for feature, alpha in zip(features, alphas): 145 for (x, y), f in feature.items(): 146 prob_yx[x][y] += alpha * f 147 # Take an exponent to get P(y, x) 148 prob_yx = numpy.exp(prob_yx) 149 # Divide out the probability over each class, so we get P(y|x). 150 for i in range(len(xs)): 151 z = sum(prob_yx[i]) 152 prob_yx[i] = prob_yx[i] / z 153 return prob_yx
154 155
156 -def _calc_f_sharp(N, nclasses, features):
157 """_calc_f_sharp(N, nclasses, features) -> matrix of f sharp values.""" 158 # f#(x, y) = SUM_i feature(x, y) 159 f_sharp = numpy.zeros((N, nclasses)) 160 for feature in features: 161 for (i, j), f in feature.items(): 162 f_sharp[i][j] += f 163 return f_sharp
164 165
166 -def _iis_solve_delta(N, feature, f_sharp, empirical, prob_yx, 167 max_newton_iterations, newton_converge):
168 # Solve delta using Newton's method for: 169 # SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0 170 delta = 0.0 171 iters = 0 172 while iters < max_newton_iterations: # iterate for Newton's method 173 f_newton = df_newton = 0.0 # evaluate the function and derivative 174 for (i, j), f in feature.items(): 175 prod = prob_yx[i][j] * f * numpy.exp(delta * f_sharp[i][j]) 176 f_newton += prod 177 df_newton += prod * f_sharp[i][j] 178 f_newton, df_newton = empirical - f_newton / N, -df_newton / N 179 180 ratio = f_newton / df_newton 181 delta -= ratio 182 if numpy.fabs(ratio) < newton_converge: # converged 183 break 184 iters = iters + 1 185 else: 186 raise RuntimeError("Newton's method did not converge") 187 return delta
188 189
190 -def _train_iis(xs, classes, features, f_sharp, alphas, e_empirical, 191 max_newton_iterations, newton_converge):
192 """Do one iteration of hill climbing to find better alphas (PRIVATE).""" 193 # This is a good function to parallelize. 194 195 # Pre-calculate P(y|x) 196 p_yx = _calc_p_class_given_x(xs, classes, features, alphas) 197 198 N = len(xs) 199 newalphas = alphas[:] 200 for i in range(len(alphas)): 201 delta = _iis_solve_delta(N, features[i], f_sharp, e_empirical[i], p_yx, 202 max_newton_iterations, newton_converge) 203 newalphas[i] += delta 204 return newalphas
205 206
207 -def train(training_set, results, feature_fns, update_fn=None, 208 max_iis_iterations=10000, iis_converge=1.0e-5, 209 max_newton_iterations=100, newton_converge=1.0e-10):
210 """Train a maximum entropy classifier, returns MaxEntropy object. 211 212 Train a maximum entropy classifier on a training set. 213 training_set is a list of observations. results is a list of the 214 class assignments for each observation. feature_fns is a list of 215 the features. These are callback functions that take an 216 observation and class and return a 1 or 0. update_fn is a 217 callback function that is called at each training iteration. It is 218 passed a MaxEntropy object that encapsulates the current state of 219 the training. 220 221 The maximum number of iterations and the convergence criterion for IIS 222 are given by max_iis_iterations and iis_converge, respectively, while 223 max_newton_iterations and newton_converge are the maximum number 224 of iterations and the convergence criterion for Newton's method. 225 """ 226 if not training_set: 227 raise ValueError("No data in the training set.") 228 if len(training_set) != len(results): 229 raise ValueError("training_set and results should be parallel lists.") 230 231 # Rename variables for convenience. 232 xs, ys = training_set, results 233 234 # Get a list of all the classes that need to be trained. 235 classes = sorted(set(results)) 236 237 # Cache values for all features. 238 features = [_eval_feature_fn(fn, training_set, classes) 239 for fn in feature_fns] 240 # Cache values for f#. 241 f_sharp = _calc_f_sharp(len(training_set), len(classes), features) 242 243 # Pre-calculate the empirical expectations of the features. 244 e_empirical = _calc_empirical_expects(xs, ys, classes, features) 245 246 # Now train the alpha parameters to weigh each feature. 247 alphas = [0.0] * len(features) 248 iters = 0 249 while iters < max_iis_iterations: 250 nalphas = _train_iis(xs, classes, features, f_sharp, 251 alphas, e_empirical, 252 max_newton_iterations, newton_converge) 253 diff = map(lambda x, y: numpy.fabs(x - y), alphas, nalphas) 254 diff = reduce(lambda x, y: x + y, diff, 0) 255 alphas = nalphas 256 257 me = MaxEntropy() 258 me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns 259 if update_fn is not None: 260 update_fn(me) 261 262 if diff < iis_converge: # converged 263 break 264 else: 265 raise RuntimeError("IIS did not converge") 266 267 return me
268 269 270 if __name__ == "__main__": 271 # Car data from example Naive Bayes Classifier example by Eric Meisner November 22, 2003 272 # http://www.inf.u-szeged.hu/~ormandi/teaching/mi2/02-naiveBayes-example.pdf 273 274 xcar = [ 275 ['Red', 'Sports', 'Domestic'], 276 ['Red', 'Sports', 'Domestic'], 277 ['Red', 'Sports', 'Domestic'], 278 ['Yellow', 'Sports', 'Domestic'], 279 ['Yellow', 'Sports', 'Imported'], 280 ['Yellow', 'SUV', 'Imported'], 281 ['Yellow', 'SUV', 'Imported'], 282 ['Yellow', 'SUV', 'Domestic'], 283 ['Red', 'SUV', 'Imported'], 284 ['Red', 'Sports', 'Imported'] 285 ] 286 287 ycar = [ 288 'Yes', 289 'No', 290 'Yes', 291 'No', 292 'Yes', 293 'No', 294 'Yes', 295 'No', 296 'No', 297 'Yes' 298 ] 299 300 # Requires some rules or features
301 - def udf1(ts, cl):
302 if ts[0] == 'Red': 303 return 0 304 else: 305 return 1
306
307 - def udf2(ts, cl):
308 if ts[1] == 'Sports': 309 return 0 310 else: 311 return 1
312
313 - def udf3(ts, cl):
314 if ts[2] == 'Domestic': 315 return 0 316 else: 317 return 1
318 319 user_functions = [udf1, udf2, udf3] # must be an iterable type 320 321 xe = train(xcar, ycar, user_functions) 322 for xv, yv in zip(xcar, ycar): 323 xc = classify(xe, xv) 324 print('Pred: %s gives %s y is %s' % (xv, xc, yv)) 325