Package Bio :: Module NaiveBayes
[hide private]
[frames] | no frames]

Source Code for Module Bio.NaiveBayes

  1  # Copyright 2000 by Jeffrey Chang.  All rights reserved. 
  2  # This code is part of the Biopython distribution and governed by its 
  3  # license.  Please see the LICENSE file that should have been included 
  4  # as part of this package. 
  5   
  6  """This provides code for a general Naive Bayes learner. 
  7   
  8  Naive Bayes is a supervised classification algorithm that uses Bayes 
  9  rule to compute the fit between a new observation and some previously 
 10  observed data.  The observations are discrete feature vectors, with 
 11  the Bayes assumption that the features are independent.  Although this 
 12  is hardly ever true, the classifier works well enough in practice. 
 13   
 14  Glossary: 
 15      - observation - A feature vector of discrete data. 
 16      - class       - A possible classification for an observation. 
 17   
 18   
 19  Classes: 
 20      - NaiveBayes - Holds information for a naive Bayes classifier. 
 21   
 22  Functions: 
 23      - train     - Train a new naive Bayes classifier. 
 24      - calculate - Calculate the probabilities of each class, given an observation. 
 25      - classify  - Classify an observation into a class. 
 26   
 27  """ 
 28   
 29  from __future__ import print_function 
 30   
 31  import numpy 
 32   
 33   
34 -def _contents(items):
35 term = 1.0 / len(items) 36 counts = {} 37 for item in items: 38 counts[item] = counts.get(item, 0) + term 39 return counts
40 41
42 -class NaiveBayes(object):
43 """Holds information for a NaiveBayes classifier. 44 45 Attributes: 46 - classes - List of the possible classes of data. 47 - p_conditional - CLASS x DIM array of dicts of value -> ``P(value|class,dim)`` 48 - p_prior - List of the prior probabilities for every class. 49 - dimensionality - Dimensionality of the data. 50 51 """
52 - def __init__(self):
53 self.classes = [] 54 self.p_conditional = None 55 self.p_prior = [] 56 self.dimensionality = None
57 58
59 -def calculate(nb, observation, scale=False):
60 """Calculate ``log P(class|observation)`` for each class. 61 62 - nb - A NaiveBayes classifier that has been trained. 63 - observation - A list representing the observed data. 64 - scale - Boolean to indicate whether the probability should be 65 scaled by ``P(observation)``. By default, no scaling is done. 66 67 Returns: 68 A dictionary where the keys is the class and the value is the log 69 probability of the class. 70 """ 71 # P(class|observation) = P(observation|class)*P(class)/P(observation) 72 # Taking the log: 73 # lP(class|observation) = lP(observation|class)+lP(class)-lP(observation) 74 75 # Make sure the observation has the right dimensionality. 76 if len(observation) != nb.dimensionality: 77 raise ValueError("observation in {0} dimension, but classifier in {1}".format(len(observation), 78 nb.dimensionality)) 79 80 # Calculate log P(observation|class) for every class. 81 n = len(nb.classes) 82 lp_observation_class = numpy.zeros(n) # array of log P(observation|class) 83 for i in range(n): 84 # log P(observation|class) = SUM_i log P(observation_i|class) 85 probs = [None] * len(observation) 86 for j in range(len(observation)): 87 probs[j] = nb.p_conditional[i][j].get(observation[j], 0) 88 lprobs = numpy.log(numpy.clip(probs, 1.e-300, 1.e+300)) 89 lp_observation_class[i] = sum(lprobs) 90 91 # Calculate log P(class). 92 lp_prior = numpy.log(nb.p_prior) 93 94 # Calculate log P(observation). 95 lp_observation = 0.0 # P(observation) 96 if scale: # Only calculate this if requested. 97 # log P(observation) = log SUM_i P(observation|class_i)P(class_i) 98 obs = numpy.exp(numpy.clip(lp_prior + lp_observation_class, -700, +700)) 99 lp_observation = numpy.log(sum(obs)) 100 101 # Calculate log P(class|observation). 102 lp_class_observation = {} # Dict of class : log P(class|observation) 103 for i in range(len(nb.classes)): 104 lp_class_observation[nb.classes[i]] = \ 105 lp_observation_class[i] + lp_prior[i] - lp_observation 106 107 return lp_class_observation
108 109
110 -def classify(nb, observation):
111 """ Classify an observation into a class. 112 113 ``classify(nb, observation) -> class`` 114 115 """ 116 # The class is the one with the highest probability. 117 probs = calculate(nb, observation, scale=False) 118 max_prob = max_class = None 119 for klass in nb.classes: 120 if max_prob is None or probs[klass] > max_prob: 121 max_prob, max_class = probs[klass], klass 122 return max_class
123 124
125 -def train(training_set, results, priors=None, typecode=None):
126 """ Train a naive bayes classifier on a training set. 127 128 ``train(training_set, results[, priors]) -> NaiveBayes`` 129 130 - training_set - List of observations. 131 - results - List of the class assignments for each observation. 132 Thus, training_set and results must be the same length. 133 - priors - Optional dictionary specifying the prior probabilities 134 for each type of result. If not specified, the priors will be 135 estimated from the training results. 136 137 """ 138 if not len(training_set): 139 raise ValueError("No data in the training set.") 140 if len(training_set) != len(results): 141 raise ValueError("training_set and results should be parallel lists.") 142 143 # If no typecode is specified, try to pick a reasonable one. If 144 # training_set is a Numeric array, then use that typecode. 145 # Otherwise, choose a reasonable default. 146 # XXX NOT IMPLEMENTED 147 148 # Check to make sure each vector in the training set has the same 149 # dimensionality. 150 dimensions = [len(x) for x in training_set] 151 if min(dimensions) != max(dimensions): 152 raise ValueError("observations have different dimensionality") 153 154 nb = NaiveBayes() 155 nb.dimensionality = dimensions[0] 156 157 # Get a list of all the classes, and 158 # estimate the prior probabilities for the classes. 159 if priors is not None: 160 percs = priors 161 nb.classes = list(set(results)) 162 else: 163 class_freq = _contents(results) 164 nb.classes = list(class_freq.keys()) 165 percs = class_freq 166 nb.classes.sort() # keep it tidy 167 168 nb.p_prior = numpy.zeros(len(nb.classes)) 169 for i in range(len(nb.classes)): 170 nb.p_prior[i] = percs[nb.classes[i]] 171 172 # Collect all the observations in class. For each class, make a 173 # matrix of training instances versus dimensions. I might be able 174 # to optimize this with Numeric, if the training_set parameter 175 # were guaranteed to be a matrix. However, this may not be the 176 # case, because the client may be hacking up a sparse matrix or 177 # something. 178 c2i = {} # class to index of class 179 for index, key in enumerate(nb.classes): 180 c2i[key] = index 181 observations = [[] for c in nb.classes] # separate observations by class 182 for i in range(len(results)): 183 klass, obs = results[i], training_set[i] 184 observations[c2i[klass]].append(obs) 185 # Now make the observations Numeric matrix. 186 for i in range(len(observations)): 187 # XXX typecode must be specified! 188 observations[i] = numpy.asarray(observations[i], typecode) 189 190 # Calculate P(value|class,dim) for every class. 191 # This is a good loop to optimize. 192 nb.p_conditional = [] 193 for i in range(len(nb.classes)): 194 class_observations = observations[i] # observations for this class 195 nb.p_conditional.append([None] * nb.dimensionality) 196 for j in range(nb.dimensionality): 197 # Collect all the values in this dimension. 198 values = class_observations[:, j] 199 200 # Add pseudocounts here. This needs to be parameterized. 201 # values = list(values) + range(len(nb.classes)) # XXX add 1 202 203 # Estimate P(value|class,dim) 204 nb.p_conditional[i][j] = _contents(values) 205 return nb
206