Package Bio :: Package GenBank :: Module Scanner
[hide private]
[frames] | no frames]

Source Code for Module Bio.GenBank.Scanner

   1  # Copyright 2007-2010 by Peter Cock.  All rights reserved. 
   2  # Revisions copyright 2010 by Uri Laserson.  All rights reserved. 
   3  # This code is part of the Biopython distribution and governed by its 
   4  # license.  Please see the LICENSE file that should have been included 
   5  # as part of this package. 
   6  """Internal code for parsing GenBank and EMBL files (PRIVATE). 
   7   
   8  This code is NOT intended for direct use.  It provides a basic scanner 
   9  (for use with a event consumer such as Bio.GenBank._FeatureConsumer) 
  10  to parse a GenBank or EMBL file (with their shared INSDC feature table). 
  11   
  12  It is used by Bio.GenBank to parse GenBank files 
  13  It is also used by Bio.SeqIO to parse GenBank and EMBL files 
  14   
  15  Feature Table Documentation: 
  16  http://www.insdc.org/files/feature_table.html 
  17  http://www.ncbi.nlm.nih.gov/projects/collab/FT/index.html 
  18  ftp://ftp.ncbi.nih.gov/genbank/docs/ 
  19  """ 
  20  # 17-MAR-2009: added wgs, wgs_scafld for GenBank whole genome shotgun master records. 
  21  # These are GenBank files that summarize the content of a project, and provide lists of 
  22  # scaffold and contig files in the project. These will be in annotations['wgs'] and 
  23  # annotations['wgs_scafld']. These GenBank files do not have sequences. See 
  24  # http://groups.google.com/group/bionet.molbio.genbank/browse_thread/thread/51fb88bf39e7dc36 
  25  # http://is.gd/nNgk 
  26  # for more details of this format, and an example. 
  27  # Added by Ying Huang & Iddo Friedberg 
  28   
  29  from __future__ import print_function 
  30   
  31  import warnings 
  32  import re 
  33  from collections import defaultdict 
  34  from Bio.Seq import Seq 
  35  from Bio.SeqRecord import SeqRecord 
  36  from Bio.Alphabet import generic_protein 
  37  from Bio import BiopythonParserWarning 
  38   
  39   
40 -class InsdcScanner(object):
41 """Basic functions for breaking up a GenBank/EMBL file into sub sections. 42 43 The International Nucleotide Sequence Database Collaboration (INSDC) 44 between the DDBJ, EMBL, and GenBank. These organisations all use the 45 same "Feature Table" layout in their plain text flat file formats. 46 47 However, the header and sequence sections of an EMBL file are very 48 different in layout to those produced by GenBank/DDBJ.""" 49 50 # These constants get redefined with sensible values in the sub classes: 51 RECORD_START = "XXX" # "LOCUS " or "ID " 52 HEADER_WIDTH = 3 # 12 or 5 53 FEATURE_START_MARKERS = ["XXX***FEATURES***XXX"] 54 FEATURE_END_MARKERS = ["XXX***END FEATURES***XXX"] 55 FEATURE_QUALIFIER_INDENT = 0 56 FEATURE_QUALIFIER_SPACER = "" 57 SEQUENCE_HEADERS = ["XXX"] # with right hand side spaces removed 58
59 - def __init__(self, debug=0):
60 assert len(self.RECORD_START) == self.HEADER_WIDTH 61 for marker in self.SEQUENCE_HEADERS: 62 assert marker == marker.rstrip() 63 assert len(self.FEATURE_QUALIFIER_SPACER) == self.FEATURE_QUALIFIER_INDENT 64 self.debug = debug 65 self.line = None
66
67 - def set_handle(self, handle):
68 self.handle = handle 69 self.line = ""
70
71 - def find_start(self):
72 """Read in lines until find the ID/LOCUS line, which is returned. 73 74 Any preamble (such as the header used by the NCBI on ``*.seq.gz`` archives) 75 will we ignored.""" 76 while True: 77 if self.line: 78 line = self.line 79 self.line = "" 80 else: 81 line = self.handle.readline() 82 if not line: 83 if self.debug: 84 print("End of file") 85 return None 86 if line[:self.HEADER_WIDTH] == self.RECORD_START: 87 if self.debug > 1: 88 print("Found the start of a record:\n" + line) 89 break 90 line = line.rstrip() 91 if line == "//": 92 if self.debug > 1: 93 print("Skipping // marking end of last record") 94 elif line == "": 95 if self.debug > 1: 96 print("Skipping blank line before record") 97 else: 98 # Ignore any header before the first ID/LOCUS line. 99 if self.debug > 1: 100 print("Skipping header line before record:\n" + line) 101 self.line = line 102 return line
103
104 - def parse_header(self):
105 """Return list of strings making up the header 106 107 New line characters are removed. 108 109 Assumes you have just read in the ID/LOCUS line. 110 """ 111 assert self.line[:self.HEADER_WIDTH] == self.RECORD_START, \ 112 "Not at start of record" 113 114 header_lines = [] 115 while True: 116 line = self.handle.readline() 117 if not line: 118 raise ValueError("Premature end of line during sequence data") 119 line = line.rstrip() 120 if line in self.FEATURE_START_MARKERS: 121 if self.debug: 122 print("Found feature table") 123 break 124 # if line[:self.HEADER_WIDTH]==self.FEATURE_START_MARKER[:self.HEADER_WIDTH]: 125 # if self.debug : print("Found header table (?)") 126 # break 127 if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS: 128 if self.debug: 129 print("Found start of sequence") 130 break 131 if line == "//": 132 raise ValueError("Premature end of sequence data marker '//' found") 133 header_lines.append(line) 134 self.line = line 135 return header_lines
136
137 - def parse_features(self, skip=False):
138 """Return list of tuples for the features (if present) 139 140 Each feature is returned as a tuple (key, location, qualifiers) 141 where key and location are strings (e.g. "CDS" and 142 "complement(join(490883..490885,1..879))") while qualifiers 143 is a list of two string tuples (feature qualifier keys and values). 144 145 Assumes you have already read to the start of the features table. 146 """ 147 if self.line.rstrip() not in self.FEATURE_START_MARKERS: 148 if self.debug: 149 print("Didn't find any feature table") 150 return [] 151 152 while self.line.rstrip() in self.FEATURE_START_MARKERS: 153 self.line = self.handle.readline() 154 155 features = [] 156 line = self.line 157 while True: 158 if not line: 159 raise ValueError("Premature end of line during features table") 160 if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS: 161 if self.debug: 162 print("Found start of sequence") 163 break 164 line = line.rstrip() 165 if line == "//": 166 raise ValueError("Premature end of features table, marker '//' found") 167 if line in self.FEATURE_END_MARKERS: 168 if self.debug: 169 print("Found end of features") 170 line = self.handle.readline() 171 break 172 if line[2:self.FEATURE_QUALIFIER_INDENT].strip() == "": 173 # This is an empty feature line between qualifiers. Empty 174 # feature lines within qualifiers are handled below (ignored). 175 line = self.handle.readline() 176 continue 177 if len(line) < self.FEATURE_QUALIFIER_INDENT: 178 warnings.warn("line too short to contain a feature: %r" % line, 179 BiopythonParserWarning) 180 line = self.handle.readline() 181 continue 182 183 if skip: 184 line = self.handle.readline() 185 while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER: 186 line = self.handle.readline() 187 else: 188 # Build up a list of the lines making up this feature: 189 if line[self.FEATURE_QUALIFIER_INDENT] != " " \ 190 and " " in line[self.FEATURE_QUALIFIER_INDENT:]: 191 # The feature table design enforces a length limit on the feature keys. 192 # Some third party files (e.g. IGMT's EMBL like files) solve this by 193 # over indenting the location and qualifiers. 194 feature_key, line = line[2:].strip().split(None, 1) 195 feature_lines = [line] 196 warnings.warn("Overindented %s feature?" % feature_key, 197 BiopythonParserWarning) 198 else: 199 feature_key = line[2:self.FEATURE_QUALIFIER_INDENT].strip() 200 feature_lines = [line[self.FEATURE_QUALIFIER_INDENT:]] 201 line = self.handle.readline() 202 while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER \ 203 or (line != '' and line.rstrip() == ""): # cope with blank lines in the midst of a feature 204 # Use strip to remove any harmless trailing white space AND and leading 205 # white space (e.g. out of spec files with too much indentation) 206 feature_lines.append(line[self.FEATURE_QUALIFIER_INDENT:].strip()) 207 line = self.handle.readline() 208 features.append(self.parse_feature(feature_key, feature_lines)) 209 self.line = line 210 return features
211
212 - def parse_feature(self, feature_key, lines):
213 r"""Expects a feature as a list of strings, returns a tuple (key, location, qualifiers) 214 215 For example given this GenBank feature:: 216 217 CDS complement(join(490883..490885,1..879)) 218 /locus_tag="NEQ001" 219 /note="conserved hypothetical [Methanococcus jannaschii]; 220 COG1583:Uncharacterized ACR; IPR001472:Bipartite nuclear 221 localization signal; IPR002743: Protein of unknown 222 function DUF57" 223 /codon_start=1 224 /transl_table=11 225 /product="hypothetical protein" 226 /protein_id="NP_963295.1" 227 /db_xref="GI:41614797" 228 /db_xref="GeneID:2732620" 229 /translation="MRLLLELKALNSIDKKQLSNYLIQGFIYNILKNTEYSWLHNWKK 230 EKYFNFTLIPKKDIIENKRYYLIISSPDKRFIEVLHNKIKDLDIITIGLAQFQLRKTK 231 KFDPKLRFPWVTITPIVLREGKIVILKGDKYYKVFVKRLEELKKYNLIKKKEPILEEP 232 IEISLNQIKDGWKIIDVKDRYYDFRNKSFSAFSNWLRDLKEQSLRKYNNFCGKNFYFE 233 EAIFEGFTFYKTVSIRIRINRGEAVYIGTLWKELNVYRKLDKEEREFYKFLYDCGLGS 234 LNSMGFGFVNTKKNSAR" 235 236 Then should give input key="CDS" and the rest of the data as a list of strings 237 lines=["complement(join(490883..490885,1..879))", ..., "LNSMGFGFVNTKKNSAR"] 238 where the leading spaces and trailing newlines have been removed. 239 240 Returns tuple containing: (key as string, location string, qualifiers as list) 241 as follows for this example: 242 243 key = "CDS", string 244 location = "complement(join(490883..490885,1..879))", string 245 qualifiers = list of string tuples: 246 247 [('locus_tag', '"NEQ001"'), 248 ('note', '"conserved hypothetical [Methanococcus jannaschii];\nCOG1583:..."'), 249 ('codon_start', '1'), 250 ('transl_table', '11'), 251 ('product', '"hypothetical protein"'), 252 ('protein_id', '"NP_963295.1"'), 253 ('db_xref', '"GI:41614797"'), 254 ('db_xref', '"GeneID:2732620"'), 255 ('translation', '"MRLLLELKALNSIDKKQLSNYLIQGFIYNILKNTEYSWLHNWKK\nEKYFNFT..."')] 256 257 In the above example, the "note" and "translation" were edited for compactness, 258 and they would contain multiple new line characters (displayed above as \n) 259 260 If a qualifier is quoted (in this case, everything except codon_start and 261 transl_table) then the quotes are NOT removed. 262 263 Note that no whitespace is removed. 264 """ 265 # Skip any blank lines 266 iterator = (x for x in lines if x) 267 try: 268 line = next(iterator) 269 270 feature_location = line.strip() 271 while feature_location[-1:] == ",": 272 # Multiline location, still more to come! 273 line = next(iterator) 274 feature_location += line.strip() 275 if feature_location.count("(") > feature_location.count(")"): 276 # Including the prev line in warning would be more explicit, 277 # but this way get one-and-only-one warning shown by default: 278 warnings.warn("Non-standard feature line wrapping (didn't break on comma)?", 279 BiopythonParserWarning) 280 while feature_location[-1:] == "," or feature_location.count("(") > feature_location.count(")"): 281 line = next(iterator) 282 feature_location += line.strip() 283 284 qualifiers = [] 285 286 for line_number, line in enumerate(iterator): 287 # check for extra wrapping of the location closing parentheses 288 if line_number == 0 and line.startswith(")"): 289 feature_location += line.strip() 290 elif line[0] == "/": 291 # New qualifier 292 i = line.find("=") 293 key = line[1:i] # does not work if i==-1 294 value = line[i + 1:] # we ignore 'value' if i==-1 295 if i == -1: 296 # Qualifier with no key, e.g. /pseudo 297 key = line[1:] 298 qualifiers.append((key, None)) 299 elif not value: 300 # ApE can output /note= 301 qualifiers.append((key, "")) 302 elif value == '"': 303 # One single quote 304 if self.debug: 305 print("Single quote %s:%s" % (key, value)) 306 # DO NOT remove the quote... 307 qualifiers.append((key, value)) 308 elif value[0] == '"': 309 # Quoted... 310 value_list = [value] 311 while value_list[-1][-1] != '"': 312 value_list.append(next(iterator)) 313 value = '\n'.join(value_list) 314 # DO NOT remove the quotes... 315 qualifiers.append((key, value)) 316 else: 317 # Unquoted 318 # if debug : print("Unquoted line %s:%s" % (key,value)) 319 qualifiers.append((key, value)) 320 else: 321 # Unquoted continuation 322 assert len(qualifiers) > 0 323 assert key == qualifiers[-1][0] 324 # if debug : print("Unquoted Cont %s:%s" % (key, line)) 325 if qualifiers[-1][1] is None: 326 raise StopIteration 327 qualifiers[-1] = (key, qualifiers[-1][1] + "\n" + line) 328 return (feature_key, feature_location, qualifiers) 329 except StopIteration: 330 # Bummer 331 raise ValueError("Problem with '%s' feature:\n%s" 332 % (feature_key, "\n".join(lines)))
333 356
357 - def _feed_first_line(self, consumer, line):
358 """Handle the LOCUS/ID line, passing data to the comsumer 359 360 This should be implemented by the EMBL / GenBank specific subclass 361 362 Used by the parse_records() and parse() methods. 363 """ 364 pass
365
366 - def _feed_header_lines(self, consumer, lines):
367 """Handle the header lines (list of strings), passing data to the comsumer 368 369 This should be implemented by the EMBL / GenBank specific subclass 370 371 Used by the parse_records() and parse() methods. 372 """ 373 pass
374
375 - def _feed_feature_table(self, consumer, feature_tuples):
376 """Handle the feature table (list of tuples), passing data to the comsumer 377 378 Used by the parse_records() and parse() methods. 379 """ 380 consumer.start_feature_table() 381 for feature_key, location_string, qualifiers in feature_tuples: 382 consumer.feature_key(feature_key) 383 consumer.location(location_string) 384 for q_key, q_value in qualifiers: 385 if q_value is None: 386 consumer.feature_qualifier(q_key, q_value) 387 else: 388 consumer.feature_qualifier(q_key, q_value.replace("\n", " "))
389
390 - def _feed_misc_lines(self, consumer, lines):
391 """Handle any lines between features and sequence (list of strings), passing data to the consumer 392 393 This should be implemented by the EMBL / GenBank specific subclass 394 395 Used by the parse_records() and parse() methods. 396 """ 397 pass
398
399 - def feed(self, handle, consumer, do_features=True):
400 """Feed a set of data into the consumer. 401 402 This method is intended for use with the "old" code in Bio.GenBank 403 404 Arguments: 405 406 - handle - A handle with the information to parse. 407 - consumer - The consumer that should be informed of events. 408 - do_features - Boolean, should the features be parsed? 409 Skipping the features can be much faster. 410 411 Return values: 412 413 - true - Passed a record 414 - false - Did not find a record 415 """ 416 # Should work with both EMBL and GenBank files provided the 417 # equivalent Bio.GenBank._FeatureConsumer methods are called... 418 self.set_handle(handle) 419 if not self.find_start(): 420 # Could not find (another) record 421 consumer.data = None 422 return False 423 424 # We use the above class methods to parse the file into a simplified format. 425 # The first line, header lines and any misc lines after the features will be 426 # dealt with by GenBank / EMBL specific derived classes. 427 428 # First line and header: 429 self._feed_first_line(consumer, self.line) 430 self._feed_header_lines(consumer, self.parse_header()) 431 432 # Features (common to both EMBL and GenBank): 433 if do_features: 434 self._feed_feature_table(consumer, self.parse_features(skip=False)) 435 else: 436 self.parse_features(skip=True) # ignore the data 437 438 # Footer and sequence 439 misc_lines, sequence_string = self.parse_footer() 440 self._feed_misc_lines(consumer, misc_lines) 441 442 consumer.sequence(sequence_string) 443 # Calls to consumer.base_number() do nothing anyway 444 consumer.record_end("//") 445 446 assert self.line == "//" 447 448 # And we are done 449 return True
450
451 - def parse(self, handle, do_features=True):
452 """Returns a SeqRecord (with SeqFeatures if do_features=True) 453 454 See also the method parse_records() for use on multi-record files. 455 """ 456 from Bio.GenBank import _FeatureConsumer 457 from Bio.GenBank.utils import FeatureValueCleaner 458 459 consumer = _FeatureConsumer(use_fuzziness=1, 460 feature_cleaner=FeatureValueCleaner()) 461 462 if self.feed(handle, consumer, do_features): 463 return consumer.data 464 else: 465 return None
466
467 - def parse_records(self, handle, do_features=True):
468 """Returns a SeqRecord object iterator 469 470 Each record (from the ID/LOCUS line to the // line) becomes a SeqRecord 471 472 The SeqRecord objects include SeqFeatures if do_features=True 473 474 This method is intended for use in Bio.SeqIO 475 """ 476 # This is a generator function 477 while True: 478 record = self.parse(handle, do_features) 479 if record is None: 480 break 481 if record.id is None: 482 raise ValueError("Failed to parse the record's ID. Invalid ID line?") 483 if record.name == "<unknown name>": 484 raise ValueError("Failed to parse the record's name. Invalid ID line?") 485 if record.description == "<unknown description>": 486 raise ValueError("Failed to parse the record's description") 487 yield record
488
489 - def parse_cds_features(self, handle, 490 alphabet=generic_protein, 491 tags2id=('protein_id', 'locus_tag', 'product')):
492 """Returns SeqRecord object iterator 493 494 Each CDS feature becomes a SeqRecord. 495 496 - alphabet - Used for any sequence found in a translation field. 497 - tags2id - Tupple of three strings, the feature keys to use 498 for the record id, name and description, 499 500 This method is intended for use in Bio.SeqIO 501 """ 502 self.set_handle(handle) 503 while self.find_start(): 504 # Got an EMBL or GenBank record... 505 self.parse_header() # ignore header lines! 506 feature_tuples = self.parse_features() 507 # self.parse_footer() # ignore footer lines! 508 while True: 509 line = self.handle.readline() 510 if not line: 511 break 512 if line[:2] == "//": 513 break 514 self.line = line.rstrip() 515 516 # Now go though those features... 517 for key, location_string, qualifiers in feature_tuples: 518 if key == "CDS": 519 # Create SeqRecord 520 # ================ 521 # SeqRecord objects cannot be created with annotations, they 522 # must be added afterwards. So create an empty record and 523 # then populate it: 524 record = SeqRecord(seq=None) 525 annotations = record.annotations 526 527 # Should we add a location object to the annotations? 528 # I *think* that only makes sense for SeqFeatures with their 529 # sub features... 530 annotations['raw_location'] = location_string.replace(' ', '') 531 532 for (qualifier_name, qualifier_data) in qualifiers: 533 if qualifier_data is not None \ 534 and qualifier_data[0] == '"' and qualifier_data[-1] == '"': 535 # Remove quotes 536 qualifier_data = qualifier_data[1:-1] 537 # Append the data to the annotation qualifier... 538 if qualifier_name == "translation": 539 assert record.seq is None, "Multiple translations!" 540 record.seq = Seq(qualifier_data.replace("\n", ""), alphabet) 541 elif qualifier_name == "db_xref": 542 # its a list, possibly empty. Its safe to extend 543 record.dbxrefs.append(qualifier_data) 544 else: 545 if qualifier_data is not None: 546 qualifier_data = qualifier_data.replace("\n", " ").replace(" ", " ") 547 try: 548 annotations[qualifier_name] += " " + qualifier_data 549 except KeyError: 550 # Not an addition to existing data, its the first bit 551 annotations[qualifier_name] = qualifier_data 552 553 # Fill in the ID, Name, Description 554 # ================================= 555 try: 556 record.id = annotations[tags2id[0]] 557 except KeyError: 558 pass 559 try: 560 record.name = annotations[tags2id[1]] 561 except KeyError: 562 pass 563 try: 564 record.description = annotations[tags2id[2]] 565 except KeyError: 566 pass 567 568 yield record
569 570
571 -class EmblScanner(InsdcScanner):
572 """For extracting chunks of information in EMBL files""" 573 574 RECORD_START = "ID " 575 HEADER_WIDTH = 5 576 FEATURE_START_MARKERS = ["FH Key Location/Qualifiers", "FH"] 577 FEATURE_END_MARKERS = ["XX"] # XX can also mark the end of many things! 578 FEATURE_QUALIFIER_INDENT = 21 579 FEATURE_QUALIFIER_SPACER = "FT" + " " * (FEATURE_QUALIFIER_INDENT - 2) 580 SEQUENCE_HEADERS = ["SQ", "CO"] # Remove trailing spaces 581 616
617 - def _feed_first_line(self, consumer, line):
618 assert line[:self.HEADER_WIDTH].rstrip() == "ID" 619 if line[self.HEADER_WIDTH:].count(";") == 6: 620 # Looks like the semi colon separated style introduced in 2006 621 self._feed_first_line_new(consumer, line) 622 elif line[self.HEADER_WIDTH:].count(";") == 3: 623 if line.rstrip().endswith(" SQ"): 624 # EMBL-bank patent data 625 self._feed_first_line_patents(consumer, line) 626 else: 627 # Looks like the pre 2006 style 628 self._feed_first_line_old(consumer, line) 629 else: 630 raise ValueError('Did not recognise the ID line layout:\n' + line)
631
632 - def _feed_first_line_patents(self, consumer, line):
633 # Either Non-Redundant Level 1 database records, 634 # ID <accession>; <molecule type>; <non-redundant level 1>; <cluster size L1> 635 # e.g. ID NRP_AX000635; PRT; NR1; 15 SQ 636 # 637 # Or, Non-Redundant Level 2 database records: 638 # ID <L2-accession>; <molecule type>; <non-redundant level 2>; <cluster size L2> 639 # e.g. ID NRP0000016E; PRT; NR2; 5 SQ 640 fields = line[self.HEADER_WIDTH:].rstrip()[:-3].split(";") 641 assert len(fields) == 4 642 consumer.locus(fields[0]) 643 consumer.residue_type(fields[1]) 644 consumer.data_file_division(fields[2])
645 # TODO - Record cluster size? 646
647 - def _feed_first_line_old(self, consumer, line):
648 # Expects an ID line in the style before 2006, e.g. 649 # ID SC10H5 standard; DNA; PRO; 4870 BP. 650 # ID BSUB9999 standard; circular DNA; PRO; 4214630 BP. 651 assert line[:self.HEADER_WIDTH].rstrip() == "ID" 652 fields = [line[self.HEADER_WIDTH:].split(None, 1)[0]] 653 fields.extend(line[self.HEADER_WIDTH:].split(None, 1)[1].split(";")) 654 fields = [entry.strip() for entry in fields] 655 """ 656 The tokens represent: 657 658 0. Primary accession number 659 (space sep) 660 1. ??? (e.g. standard) 661 (semi-colon) 662 2. Topology and/or Molecule type (e.g. 'circular DNA' or 'DNA') 663 3. Taxonomic division (e.g. 'PRO') 664 4. Sequence length (e.g. '4639675 BP.') 665 """ 666 consumer.locus(fields[0]) # Should we also call the accession consumer? 667 consumer.residue_type(fields[2]) 668 consumer.data_file_division(fields[3]) 669 self._feed_seq_length(consumer, fields[4])
670
671 - def _feed_first_line_new(self, consumer, line):
672 # Expects an ID line in the style introduced in 2006, e.g. 673 # ID X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP. 674 # ID CD789012; SV 4; linear; genomic DNA; HTG; MAM; 500 BP. 675 assert line[:self.HEADER_WIDTH].rstrip() == "ID" 676 fields = [data.strip() for data in line[self.HEADER_WIDTH:].strip().split(";")] 677 assert len(fields) == 7 678 """ 679 The tokens represent: 680 681 0. Primary accession number 682 1. Sequence version number 683 2. Topology: 'circular' or 'linear' 684 3. Molecule type (e.g. 'genomic DNA') 685 4. Data class (e.g. 'STD') 686 5. Taxonomic division (e.g. 'PRO') 687 6. Sequence length (e.g. '4639675 BP.') 688 """ 689 690 consumer.locus(fields[0]) 691 692 # Call the accession consumer now, to make sure we record 693 # something as the record.id, in case there is no AC line 694 consumer.accession(fields[0]) 695 696 # TODO - How to deal with the version field? At the moment the consumer 697 # will try and use this for the ID which isn't ideal for EMBL files. 698 version_parts = fields[1].split() 699 if len(version_parts) == 2 \ 700 and version_parts[0] == "SV" \ 701 and version_parts[1].isdigit(): 702 consumer.version_suffix(version_parts[1]) 703 704 # Based on how the old GenBank parser worked, merge these two: 705 consumer.residue_type(" ".join(fields[2:4])) # TODO - Store as two fields? 706 707 # consumer.xxx(fields[4]) # TODO - What should we do with the data class? 708 709 consumer.data_file_division(fields[5]) 710 711 self._feed_seq_length(consumer, fields[6])
712
713 - def _feed_seq_length(self, consumer, text):
714 length_parts = text.split() 715 assert len(length_parts) == 2, "Invalid sequence length string %r" % text 716 assert length_parts[1].upper() in ["BP", "BP.", "AA", "AA."] 717 consumer.size(length_parts[0])
718
719 - def _feed_header_lines(self, consumer, lines):
720 EMBL_INDENT = self.HEADER_WIDTH 721 EMBL_SPACER = " " * EMBL_INDENT 722 consumer_dict = { 723 'AC': 'accession', 724 'SV': 'version', # SV line removed in June 2006, now part of ID line 725 'DE': 'definition', 726 # 'RN' : 'reference_num', 727 # 'RC' : reference comment... TODO 728 # 'RP' : 'reference_bases', 729 # 'RX' : reference cross reference... DOI or Pubmed 730 'RG': 'consrtm', # optional consortium 731 # 'RA' : 'authors', 732 # 'RT' : 'title', 733 'RL': 'journal', 734 'OS': 'organism', 735 'OC': 'taxonomy', 736 # 'DR' : data reference 737 'CC': 'comment', 738 # 'XX' : splitter 739 } 740 # We have to handle the following specially: 741 # RX (depending on reference type...) 742 for line in lines: 743 line_type = line[:EMBL_INDENT].strip() 744 data = line[EMBL_INDENT:].strip() 745 if line_type == 'XX': 746 pass 747 elif line_type == 'RN': 748 # Reformat reference numbers for the GenBank based consumer 749 # e.g. '[1]' becomes '1' 750 if data[0] == "[" and data[-1] == "]": 751 data = data[1:-1] 752 consumer.reference_num(data) 753 elif line_type == 'RP': 754 # Reformat reference numbers for the GenBank based consumer 755 # e.g. '1-4639675' becomes '(bases 1 to 4639675)' 756 # and '160-550, 904-1055' becomes '(bases 160 to 550; 904 to 1055)' 757 # Note could be multi-line, and end with a comma 758 parts = [bases.replace("-", " to ").strip() for bases in data.split(",") if bases.strip()] 759 consumer.reference_bases("(bases %s)" % "; ".join(parts)) 760 elif line_type == 'RT': 761 # Remove the enclosing quotes and trailing semi colon. 762 # Note the title can be split over multiple lines. 763 if data.startswith('"'): 764 data = data[1:] 765 if data.endswith('";'): 766 data = data[:-2] 767 consumer.title(data) 768 elif line_type == 'RX': 769 # EMBL support three reference types at the moment: 770 # - PUBMED PUBMED bibliographic database (NLM) 771 # - DOI Digital Object Identifier (International DOI Foundation) 772 # - AGRICOLA US National Agriculture Library (NAL) of the US Department 773 # of Agriculture (USDA) 774 # 775 # Format: 776 # RX resource_identifier; identifier. 777 # 778 # e.g. 779 # RX DOI; 10.1016/0024-3205(83)90010-3. 780 # RX PUBMED; 264242. 781 # 782 # Currently our reference object only supports PUBMED and MEDLINE 783 # (as these were in GenBank files?). 784 key, value = data.split(";", 1) 785 if value.endswith("."): 786 value = value[:-1] 787 value = value.strip() 788 if key == "PUBMED": 789 consumer.pubmed_id(value) 790 # TODO - Handle other reference types (here and in BioSQL bindings) 791 elif line_type == 'CC': 792 # Have to pass a list of strings for this one (not just a string) 793 consumer.comment([data]) 794 elif line_type == 'DR': 795 # Database Cross-reference, format: 796 # DR database_identifier; primary_identifier; secondary_identifier. 797 # 798 # e.g. 799 # DR MGI; 98599; Tcrb-V4. 800 # 801 # TODO - How should we store any secondary identifier? 802 parts = data.rstrip(".").split(";") 803 # Turn it into "database_identifier:primary_identifier" to 804 # mimic the GenBank parser. e.g. "MGI:98599" 805 consumer.dblink("%s:%s" % (parts[0].strip(), 806 parts[1].strip())) 807 elif line_type == 'RA': 808 # Remove trailing ; at end of authors list 809 consumer.authors(data.rstrip(";")) 810 elif line_type == 'PR': 811 # In the EMBL patent files, this is a PR (PRiority) line which 812 # provides the earliest active priority within the family. 813 # The priority number comes first, followed by the priority date. 814 # 815 # e.g. 816 # PR JP19990377484 16-DEC-1999 817 # 818 # However, in most EMBL files this is a PR (PRoject) line which 819 # gives the BioProject reference number. 820 # 821 # e.g. 822 # PR Project:PRJNA60715; 823 # 824 # In GenBank files this corresponds to the old PROJECT line 825 # which was later replaced with the DBLINK line. 826 if data.startswith("Project:"): 827 # Remove trailing ; at end of the project reference 828 consumer.project(data.rstrip(";")) 829 elif line_type == 'KW': 830 consumer.keywords(data.rstrip(";")) 831 elif line_type in consumer_dict: 832 # Its a semi-automatic entry! 833 getattr(consumer, consumer_dict[line_type])(data) 834 else: 835 if self.debug: 836 print("Ignoring EMBL header line:\n%s" % line)
837
838 - def _feed_misc_lines(self, consumer, lines):
839 # TODO - Should we do something with the information on the SQ line(s)? 840 lines.append("") 841 line_iter = iter(lines) 842 try: 843 for line in line_iter: 844 if line.startswith("CO "): 845 line = line[5:].strip() 846 contig_location = line 847 while True: 848 line = next(line_iter) 849 if not line: 850 break 851 elif line.startswith("CO "): 852 # Don't need to preseve the whitespace here. 853 contig_location += line[5:].strip() 854 else: 855 raise ValueError('Expected CO (contig) continuation line, got:\n' + line) 856 consumer.contig_location(contig_location) 857 if line.startswith("SQ Sequence "): 858 # e.g. 859 # SQ Sequence 219 BP; 82 A; 48 C; 33 G; 45 T; 11 other; 860 # 861 # Or, EMBL-bank patent, e.g. 862 # SQ Sequence 465 AA; 3963407aa91d3a0d622fec679a4524e0; MD5; 863 self._feed_seq_length(consumer, line[14:].rstrip().rstrip(";").split(";", 1)[0]) 864 # TODO - Record the checksum etc? 865 return 866 except StopIteration: 867 raise ValueError("Problem in misc lines before sequence")
868 869
870 -class _ImgtScanner(EmblScanner):
871 """For extracting chunks of information in IMGT (EMBL like) files (PRIVATE). 872 873 IMGT files are like EMBL files but in order to allow longer feature types 874 the features should be indented by 25 characters not 21 characters. In 875 practice the IMGT flat files tend to use either 21 or 25 characters, so we 876 must cope with both. 877 878 This is private to encourage use of Bio.SeqIO rather than Bio.GenBank. 879 """ 880 881 FEATURE_START_MARKERS = ["FH Key Location/Qualifiers", 882 "FH Key Location/Qualifiers (from EMBL)", 883 "FH Key Location/Qualifiers", 884 "FH"] 885
886 - def parse_features(self, skip=False):
887 """Return list of tuples for the features (if present) 888 889 Each feature is returned as a tuple (key, location, qualifiers) 890 where key and location are strings (e.g. "CDS" and 891 "complement(join(490883..490885,1..879))") while qualifiers 892 is a list of two string tuples (feature qualifier keys and values). 893 894 Assumes you have already read to the start of the features table. 895 """ 896 if self.line.rstrip() not in self.FEATURE_START_MARKERS: 897 if self.debug: 898 print("Didn't find any feature table") 899 return [] 900 901 while self.line.rstrip() in self.FEATURE_START_MARKERS: 902 self.line = self.handle.readline() 903 904 bad_position_re = re.compile(r'([0-9]+)>{1}') 905 906 features = [] 907 line = self.line 908 while True: 909 if not line: 910 raise ValueError("Premature end of line during features table") 911 if line[:self.HEADER_WIDTH].rstrip() in self.SEQUENCE_HEADERS: 912 if self.debug: 913 print("Found start of sequence") 914 break 915 line = line.rstrip() 916 if line == "//": 917 raise ValueError("Premature end of features table, marker '//' found") 918 if line in self.FEATURE_END_MARKERS: 919 if self.debug: 920 print("Found end of features") 921 line = self.handle.readline() 922 break 923 if line[2:self.FEATURE_QUALIFIER_INDENT].strip() == "": 924 # This is an empty feature line between qualifiers. Empty 925 # feature lines within qualifiers are handled below (ignored). 926 line = self.handle.readline() 927 continue 928 929 if skip: 930 line = self.handle.readline() 931 while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER: 932 line = self.handle.readline() 933 else: 934 assert line[:2] == "FT" 935 try: 936 feature_key, location_start = line[2:].strip().split() 937 except ValueError: 938 # e.g. "FT TRANSMEMBRANE-REGION2163..2240\n" 939 # Assume indent of 25 as per IMGT spec, with the location 940 # start in column 26 (one-based). 941 feature_key = line[2:25].strip() 942 location_start = line[25:].strip() 943 feature_lines = [location_start] 944 line = self.handle.readline() 945 while line[:self.FEATURE_QUALIFIER_INDENT] == self.FEATURE_QUALIFIER_SPACER \ 946 or line.rstrip() == "": # cope with blank lines in the midst of a feature 947 # Use strip to remove any harmless trailing white space AND and leading 948 # white space (copes with 21 or 26 indents and orther variants) 949 assert line[:2] == "FT" 950 feature_lines.append(line[self.FEATURE_QUALIFIER_INDENT:].strip()) 951 line = self.handle.readline() 952 feature_key, location, qualifiers = \ 953 self.parse_feature(feature_key, feature_lines) 954 # Try to handle known problems with IMGT locations here: 955 if ">" in location: 956 # Nasty hack for common IMGT bug, should be >123 not 123> 957 # in a location string. At least here the meaning is clear, 958 # and since it is so common I don't want to issue a warning 959 # warnings.warn("Feature location %s is invalid, " 960 # "moving greater than sign before position" 961 # % location, BiopythonParserWarning) 962 location = bad_position_re.sub(r'>\1', location) 963 features.append((feature_key, location, qualifiers)) 964 self.line = line 965 return features
966 967
968 -class GenBankScanner(InsdcScanner):
969 """For extracting chunks of information in GenBank files""" 970 971 RECORD_START = "LOCUS " 972 HEADER_WIDTH = 12 973 FEATURE_START_MARKERS = ["FEATURES Location/Qualifiers", "FEATURES"] 974 FEATURE_END_MARKERS = [] 975 FEATURE_QUALIFIER_INDENT = 21 976 FEATURE_QUALIFIER_SPACER = " " * FEATURE_QUALIFIER_INDENT 977 SEQUENCE_HEADERS = ["CONTIG", "ORIGIN", "BASE COUNT", "WGS"] # trailing spaces removed 978 1031
1032 - def _feed_first_line(self, consumer, line):
1033 """Scan over and parse GenBank LOCUS line (PRIVATE). 1034 1035 This must cope with several variants, primarily the old and new column 1036 based standards from GenBank. Additionally EnsEMBL produces GenBank 1037 files where the LOCUS line is space separated rather that following 1038 the column based layout. 1039 1040 We also try to cope with GenBank like files with partial LOCUS lines. 1041 """ 1042 ##################################### 1043 # LOCUS line # 1044 ##################################### 1045 GENBANK_INDENT = self.HEADER_WIDTH 1046 GENBANK_SPACER = " " * GENBANK_INDENT 1047 assert line[0:GENBANK_INDENT] == 'LOCUS ', \ 1048 'LOCUS line does not start correctly:\n' + line 1049 1050 # Have to break up the locus line, and handle the different bits of it. 1051 # There are at least two different versions of the locus line... 1052 if line[29:33] in [' bp ', ' aa ', ' rc '] and line[55:62] == ' ': 1053 # Old... note we insist on the 55:62 being empty to avoid trying 1054 # to parse space separated LOCUS lines from Ensembl etc, see below. 1055 # 1056 # Positions Contents 1057 # --------- -------- 1058 # 00:06 LOCUS 1059 # 06:12 spaces 1060 # 12:?? Locus name 1061 # ??:?? space 1062 # ??:29 Length of sequence, right-justified 1063 # 29:33 space, bp, space 1064 # 33:41 strand type 1065 # 41:42 space 1066 # 42:51 Blank (implies linear), linear or circular 1067 # 51:52 space 1068 # 52:55 The division code (e.g. BCT, VRL, INV) 1069 # 55:62 space 1070 # 62:73 Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991) 1071 # 1072 # assert line[29:33] in [' bp ', ' aa ',' rc '] , \ 1073 # 'LOCUS line does not contain size units at expected position:\n' + line 1074 assert line[41:42] == ' ', \ 1075 'LOCUS line does not contain space at position 42:\n' + line 1076 assert line[42:51].strip() in ['', 'linear', 'circular'], \ 1077 'LOCUS line does not contain valid entry (linear, circular, ...):\n' + line 1078 assert line[51:52] == ' ', \ 1079 'LOCUS line does not contain space at position 52:\n' + line 1080 # assert line[55:62] == ' ', \ 1081 # 'LOCUS line does not contain spaces from position 56 to 62:\n' + line 1082 if line[62:73].strip(): 1083 assert line[64:65] == '-', \ 1084 'LOCUS line does not contain - at position 65 in date:\n' + line 1085 assert line[68:69] == '-', \ 1086 'LOCUS line does not contain - at position 69 in date:\n' + line 1087 1088 name_and_length_str = line[GENBANK_INDENT:29] 1089 while ' ' in name_and_length_str: 1090 name_and_length_str = name_and_length_str.replace(' ', ' ') 1091 name_and_length = name_and_length_str.split(' ') 1092 assert len(name_and_length) <= 2, \ 1093 'Cannot parse the name and length in the LOCUS line:\n' + line 1094 assert len(name_and_length) != 1, \ 1095 'Name and length collide in the LOCUS line:\n' + line 1096 # Should be possible to split them based on position, if 1097 # a clear definition of the standard exists THAT AGREES with 1098 # existing files. 1099 consumer.locus(name_and_length[0]) 1100 consumer.size(name_and_length[1]) 1101 # consumer.residue_type(line[33:41].strip()) 1102 1103 if line[33:51].strip() == "" and line[29:33] == ' aa ': 1104 # Amino acids -> protein (even if there is no residue type given) 1105 # We want to use a protein alphabet in this case, rather than a 1106 # generic one. Not sure if this is the best way to achieve this, 1107 # but it works because the scanner checks for this: 1108 consumer.residue_type("PROTEIN") 1109 else: 1110 consumer.residue_type(line[33:51].strip()) 1111 1112 consumer.data_file_division(line[52:55]) 1113 if line[62:73].strip(): 1114 consumer.date(line[62:73]) 1115 elif line[40:44] in [' bp ', ' aa ', ' rc '] \ 1116 and line[54:64].strip() in ['', 'linear', 'circular']: 1117 # New... linear/circular/big blank test should avoid EnsEMBL style 1118 # LOCUS line being treated like a proper column based LOCUS line. 1119 # 1120 # Positions Contents 1121 # --------- -------- 1122 # 00:06 LOCUS 1123 # 06:12 spaces 1124 # 12:?? Locus name 1125 # ??:?? space 1126 # ??:40 Length of sequence, right-justified 1127 # 40:44 space, bp, space 1128 # 44:47 Blank, ss-, ds-, ms- 1129 # 47:54 Blank, DNA, RNA, tRNA, mRNA, uRNA, snRNA, cDNA 1130 # 54:55 space 1131 # 55:63 Blank (implies linear), linear or circular 1132 # 63:64 space 1133 # 64:67 The division code (e.g. BCT, VRL, INV) 1134 # 67:68 space 1135 # 68:79 Date, in the form dd-MMM-yyyy (e.g., 15-MAR-1991) 1136 # 1137 assert line[40:44] in [' bp ', ' aa ', ' rc '], \ 1138 'LOCUS line does not contain size units at expected position:\n' + line 1139 assert line[44:47] in [' ', 'ss-', 'ds-', 'ms-'], \ 1140 'LOCUS line does not have valid strand type (Single stranded, ...):\n' + line 1141 assert line[47:54].strip() == "" \ 1142 or 'DNA' in line[47:54].strip().upper() \ 1143 or 'RNA' in line[47:54].strip().upper(), \ 1144 'LOCUS line does not contain valid sequence type (DNA, RNA, ...):\n' + line 1145 assert line[54:55] == ' ', \ 1146 'LOCUS line does not contain space at position 55:\n' + line 1147 assert line[55:63].strip() in ['', 'linear', 'circular'], \ 1148 'LOCUS line does not contain valid entry (linear, circular, ...):\n' + line 1149 assert line[63:64] == ' ', \ 1150 'LOCUS line does not contain space at position 64:\n' + line 1151 assert line[67:68] == ' ', \ 1152 'LOCUS line does not contain space at position 68:\n' + line 1153 if line[68:79].strip(): 1154 assert line[70:71] == '-', \ 1155 'LOCUS line does not contain - at position 71 in date:\n' + line 1156 assert line[74:75] == '-', \ 1157 'LOCUS line does not contain - at position 75 in date:\n' + line 1158 1159 name_and_length_str = line[GENBANK_INDENT:40] 1160 while ' ' in name_and_length_str: 1161 name_and_length_str = name_and_length_str.replace(' ', ' ') 1162 name_and_length = name_and_length_str.split(' ') 1163 assert len(name_and_length) <= 2, \ 1164 'Cannot parse the name and length in the LOCUS line:\n' + line 1165 assert len(name_and_length) != 1, \ 1166 'Name and length collide in the LOCUS line:\n' + line 1167 # Should be possible to split them based on position, if 1168 # a clear definition of the stand exists THAT AGREES with 1169 # existing files. 1170 consumer.locus(name_and_length[0]) 1171 consumer.size(name_and_length[1]) 1172 1173 if line[44:54].strip() == "" and line[40:44] == ' aa ': 1174 # Amino acids -> protein (even if there is no residue type given) 1175 # We want to use a protein alphabet in this case, rather than a 1176 # generic one. Not sure if this is the best way to achieve this, 1177 # but it works because the scanner checks for this: 1178 consumer.residue_type(("PROTEIN " + line[54:63]).strip()) 1179 else: 1180 consumer.residue_type(line[44:63].strip()) 1181 1182 consumer.data_file_division(line[64:67]) 1183 if line[68:79].strip(): 1184 consumer.date(line[68:79]) 1185 elif line[GENBANK_INDENT:].strip().count(" ") == 0: 1186 # Truncated LOCUS line, as produced by some EMBOSS tools - see bug 1762 1187 # 1188 # e.g. 1189 # 1190 # "LOCUS U00096" 1191 # 1192 # rather than: 1193 # 1194 # "LOCUS U00096 4639675 bp DNA circular BCT" 1195 # 1196 # Positions Contents 1197 # --------- -------- 1198 # 00:06 LOCUS 1199 # 06:12 spaces 1200 # 12:?? Locus name 1201 if line[GENBANK_INDENT:].strip() != "": 1202 consumer.locus(line[GENBANK_INDENT:].strip()) 1203 else: 1204 # Must just have just "LOCUS ", is this even legitimate? 1205 # We should be able to continue parsing... we need real world testcases! 1206 warnings.warn("Minimal LOCUS line found - is this " 1207 "correct?\n:%r" % line, BiopythonParserWarning) 1208 elif len(line.split()) == 8 and line.split()[3] in ("aa", "bp") and \ 1209 line.split()[5] in ('linear', 'circular'): 1210 # Cope with invalidly spaced GenBank LOCUS lines like 1211 # LOCUS AB070938 6497 bp DNA linear BCT 11-OCT-2001 1212 splitline = line.split() 1213 consumer.locus(splitline[1]) 1214 consumer.size(splitline[2]) 1215 consumer.residue_type(splitline[4]) 1216 consumer.data_file_division(splitline[6]) 1217 consumer.date(splitline[7]) 1218 warnings.warn("Attempting to parse malformed locus line:\n%r\n" 1219 "Found locus %r size %r residue_type %r\n" 1220 "Some fields may be wrong." % (line, splitline[1], 1221 splitline[2], splitline[4]), BiopythonParserWarning) 1222 elif len(line.split()) == 7 and line.split()[3] in ["aa", "bp"]: 1223 # Cope with EnsEMBL genbank files which use space separation rather 1224 # than the expected column based layout. e.g. 1225 # LOCUS HG531_PATCH 1000000 bp DNA HTG 18-JUN-2011 1226 # LOCUS HG531_PATCH 759984 bp DNA HTG 18-JUN-2011 1227 # LOCUS HG506_HG1000_1_PATCH 814959 bp DNA HTG 18-JUN-2011 1228 # LOCUS HG506_HG1000_1_PATCH 1219964 bp DNA HTG 18-JUN-2011 1229 # Notice that the 'bp' can occur in the position expected by either 1230 # the old or the new fixed column standards (parsed above). 1231 splitline = line.split() 1232 consumer.locus(splitline[1]) 1233 consumer.size(splitline[2]) 1234 consumer.residue_type(splitline[4]) 1235 consumer.data_file_division(splitline[5]) 1236 consumer.date(splitline[6]) 1237 elif len(line.split()) >= 4 and line.split()[3] in ["aa", "bp"]: 1238 # Cope with EMBOSS seqret output where it seems the locus id can cause 1239 # the other fields to overflow. We just IGNORE the other fields! 1240 warnings.warn("Malformed LOCUS line found - is this " 1241 "correct?\n:%r" % line, BiopythonParserWarning) 1242 consumer.locus(line.split()[1]) 1243 consumer.size(line.split()[2]) 1244 elif len(line.split()) >= 4 and line.split()[-1] in ["aa", "bp"]: 1245 # Cope with pseudo-GenBank files like this: 1246 # "LOCUS RNA5 complete 1718 bp" 1247 # Treat everything between LOCUS and the size as the identifier. 1248 warnings.warn("Malformed LOCUS line found - is this " 1249 "correct?\n:%r" % line, BiopythonParserWarning) 1250 consumer.locus(line[5:].rsplit(None, 2)[0].strip()) 1251 consumer.size(line.split()[-2]) 1252 else: 1253 raise ValueError('Did not recognise the LOCUS line layout:\n' + line)
1254
1255 - def _feed_header_lines(self, consumer, lines):
1256 # Following dictionary maps GenBank lines to the associated 1257 # consumer methods - the special cases like LOCUS where one 1258 # genbank line triggers several consumer calls have to be 1259 # handled individually. 1260 GENBANK_INDENT = self.HEADER_WIDTH 1261 GENBANK_SPACER = " " * GENBANK_INDENT 1262 STRUCTURED_COMMENT_START = "-START##" 1263 STRUCTURED_COMMENT_END = "-END##" 1264 STRUCTURED_COMMENT_DELIM = " :: " 1265 consumer_dict = { 1266 'DEFINITION': 'definition', 1267 'ACCESSION': 'accession', 1268 'NID': 'nid', 1269 'PID': 'pid', 1270 'DBSOURCE': 'db_source', 1271 'KEYWORDS': 'keywords', 1272 'SEGMENT': 'segment', 1273 'SOURCE': 'source', 1274 'AUTHORS': 'authors', 1275 'CONSRTM': 'consrtm', 1276 'PROJECT': 'project', 1277 'TITLE': 'title', 1278 'JOURNAL': 'journal', 1279 'MEDLINE': 'medline_id', 1280 'PUBMED': 'pubmed_id', 1281 'REMARK': 'remark'} 1282 # We have to handle the following specially: 1283 # ORIGIN (locus, size, residue_type, data_file_division and date) 1284 # COMMENT (comment) 1285 # VERSION (version and gi) 1286 # DBLINK (database links like projects, newlines important) 1287 # REFERENCE (eference_num and reference_bases) 1288 # ORGANISM (organism and taxonomy) 1289 lines = [_f for _f in lines if _f] 1290 lines.append("") # helps avoid getting StopIteration all the time 1291 line_iter = iter(lines) 1292 try: 1293 line = next(line_iter) 1294 while True: 1295 if not line: 1296 break 1297 line_type = line[:GENBANK_INDENT].strip() 1298 data = line[GENBANK_INDENT:].strip() 1299 1300 if line_type == 'VERSION': 1301 # Need to call consumer.version(), and maybe also consumer.gi() as well. 1302 # e.g. 1303 # VERSION AC007323.5 GI:6587720 1304 while ' ' in data: 1305 data = data.replace(' ', ' ') 1306 if ' GI:' not in data: 1307 consumer.version(data) 1308 else: 1309 if self.debug: 1310 print("Version [" + data.split(' GI:')[0] + "], gi [" + data.split(' GI:')[1] + "]") 1311 consumer.version(data.split(' GI:')[0]) 1312 consumer.gi(data.split(' GI:')[1]) 1313 # Read in the next line! 1314 line = next(line_iter) 1315 elif line_type == 'DBLINK': 1316 # Need to call consumer.dblink() for each line, e.g. 1317 # DBLINK Project: 57779 1318 # BioProject: PRJNA57779 1319 consumer.dblink(data.strip()) 1320 # Read in the next line, and see if its more of the DBLINK section: 1321 while True: 1322 line = next(line_iter) 1323 if line[:GENBANK_INDENT] == GENBANK_SPACER: 1324 # Add this continuation to the data string 1325 consumer.dblink(line[GENBANK_INDENT:].strip()) 1326 else: 1327 # End of the DBLINK, leave this text in the variable "line" 1328 break 1329 elif line_type == 'REFERENCE': 1330 if self.debug > 1: 1331 print("Found reference [" + data + "]") 1332 # Need to call consumer.reference_num() and consumer.reference_bases() 1333 # e.g. 1334 # REFERENCE 1 (bases 1 to 86436) 1335 # 1336 # Note that this can be multiline, see Bug 1968, e.g. 1337 # 1338 # REFERENCE 42 (bases 1517 to 1696; 3932 to 4112; 17880 to 17975; 21142 to 1339 # 28259) 1340 # 1341 # For such cases we will call the consumer once only. 1342 data = data.strip() 1343 1344 # Read in the next line, and see if its more of the reference: 1345 while True: 1346 line = next(line_iter) 1347 if line[:GENBANK_INDENT] == GENBANK_SPACER: 1348 # Add this continuation to the data string 1349 data += " " + line[GENBANK_INDENT:] 1350 if self.debug > 1: 1351 print("Extended reference text [" + data + "]") 1352 else: 1353 # End of the reference, leave this text in the variable "line" 1354 break 1355 1356 # We now have all the reference line(s) stored in a string, data, 1357 # which we pass to the consumer 1358 while ' ' in data: 1359 data = data.replace(' ', ' ') 1360 if ' ' not in data: 1361 if self.debug > 2: 1362 print('Reference number \"' + data + '\"') 1363 consumer.reference_num(data) 1364 else: 1365 if self.debug > 2: 1366 print('Reference number \"' + data[:data.find(' ')] + '\", \"' + data[data.find(' ') + 1:] + '\"') 1367 consumer.reference_num(data[:data.find(' ')]) 1368 consumer.reference_bases(data[data.find(' ') + 1:]) 1369 elif line_type == 'ORGANISM': 1370 # Typically the first line is the organism, and subsequent lines 1371 # are the taxonomy lineage. However, given longer and longer 1372 # species names (as more and more strains and sub strains get 1373 # sequenced) the oragnism name can now get wrapped onto multiple 1374 # lines. The NCBI say we have to recognise the lineage line by 1375 # the presence of semi-colon delimited entries. In the long term, 1376 # they are considering adding a new keyword (e.g. LINEAGE). 1377 # See Bug 2591 for details. 1378 organism_data = data 1379 lineage_data = "" 1380 while True: 1381 line = next(line_iter) 1382 if line[0:GENBANK_INDENT] == GENBANK_SPACER: 1383 if lineage_data or ";" in line: 1384 lineage_data += " " + line[GENBANK_INDENT:] 1385 elif line[GENBANK_INDENT:].strip() == ".": 1386 # No lineage data, just . place holder 1387 pass 1388 else: 1389 organism_data += " " + line[GENBANK_INDENT:].strip() 1390 else: 1391 # End of organism and taxonomy 1392 break 1393 consumer.organism(organism_data) 1394 if lineage_data.strip() == "" and self.debug > 1: 1395 print("Taxonomy line(s) missing or blank") 1396 consumer.taxonomy(lineage_data.strip()) 1397 del organism_data, lineage_data 1398 elif line_type == 'COMMENT': 1399 # A COMMENT can either be plain text or tabular (Structured Comment), 1400 # or contain both. Multiline comments are common. The code calls 1401 # consumer.comment() once with a list where each entry 1402 # is a line. If there's a structured comment consumer.structured_comment() 1403 # is called with a dict of dicts where the secondary key/value pairs are 1404 # the same as those in the structured comment table. The primary key is 1405 # the title or header of the table (e.g. Assembly-Data, FluData). See 1406 # http://www.ncbi.nlm.nih.gov/genbank/structuredcomment 1407 # for more information on Structured Comments. 1408 data = line[GENBANK_INDENT:] 1409 if self.debug > 1: 1410 print("Found comment") 1411 comment_list = [] 1412 structured_comment_dict = defaultdict(dict) 1413 structured_comment_key = '' 1414 1415 if STRUCTURED_COMMENT_START in data: 1416 structured_comment_key = re.search(r"([^#]+){0}$".format(STRUCTURED_COMMENT_START), data).group(1) 1417 if self.debug > 1: 1418 print("Found Structured Comment") 1419 else: 1420 comment_list.append(data) 1421 1422 while True: 1423 line = next(line_iter) 1424 data = line[GENBANK_INDENT:] 1425 if line[0:GENBANK_INDENT] == GENBANK_SPACER: 1426 if STRUCTURED_COMMENT_START in data: 1427 structured_comment_key = re.search(r"([^#]+){0}$".format(STRUCTURED_COMMENT_START), data).group(1) 1428 elif structured_comment_key is not None and STRUCTURED_COMMENT_DELIM in data: 1429 match = re.search(r"(.+?)\s*{0}\s*(.+)".format(STRUCTURED_COMMENT_DELIM), data) 1430 structured_comment_dict[structured_comment_key][match.group(1)] = match.group(2) 1431 if self.debug > 2: 1432 print("Structured Comment continuation [" + data + "]") 1433 elif STRUCTURED_COMMENT_END not in data: 1434 comment_list.append(data) 1435 if self.debug > 2: 1436 print("Comment continuation [" + data + "]") 1437 else: 1438 # End of the comment 1439 break 1440 if comment_list: 1441 consumer.comment(comment_list) 1442 if structured_comment_dict: 1443 consumer.structured_comment(structured_comment_dict) 1444 del comment_list, structured_comment_key, structured_comment_dict 1445 elif line_type in consumer_dict: 1446 # It's a semi-automatic entry! 1447 # Now, this may be a multi line entry... 1448 while True: 1449 line = next(line_iter) 1450 if line[0:GENBANK_INDENT] == GENBANK_SPACER: 1451 data += ' ' + line[GENBANK_INDENT:] 1452 else: 1453 # We now have all the data for this entry: 1454 getattr(consumer, consumer_dict[line_type])(data) 1455 # End of continuation - return to top of loop! 1456 break 1457 else: 1458 if self.debug: 1459 print("Ignoring GenBank header line:\n" % line) 1460 # Read in next line 1461 line = next(line_iter) 1462 except StopIteration: 1463 raise ValueError("Problem in header")
1464
1465 - def _feed_misc_lines(self, consumer, lines):
1466 # Deals with a few misc lines between the features and the sequence 1467 GENBANK_INDENT = self.HEADER_WIDTH 1468 GENBANK_SPACER = " " * GENBANK_INDENT 1469 lines.append("") 1470 line_iter = iter(lines) 1471 try: 1472 for line in line_iter: 1473 if line.startswith('BASE COUNT'): 1474 line = line[10:].strip() 1475 if line: 1476 if self.debug: 1477 print("base_count = " + line) 1478 consumer.base_count(line) 1479 if line.startswith('ORIGIN'): 1480 line = line[6:].strip() 1481 if line: 1482 if self.debug: 1483 print("origin_name = " + line) 1484 consumer.origin_name(line) 1485 if line.startswith('WGS '): 1486 line = line[3:].strip() 1487 consumer.wgs(line) 1488 if line.startswith('WGS_SCAFLD'): 1489 line = line[10:].strip() 1490 consumer.add_wgs_scafld(line) 1491 if line.startswith('CONTIG'): 1492 line = line[6:].strip() 1493 contig_location = line 1494 while True: 1495 line = next(line_iter) 1496 if not line: 1497 break 1498 elif line[:GENBANK_INDENT] == GENBANK_SPACER: 1499 # Don't need to preseve the whitespace here. 1500 contig_location += line[GENBANK_INDENT:].rstrip() 1501 elif line.startswith('ORIGIN'): 1502 # Strange, seen this in GenPept files via Entrez gbwithparts 1503 line = line[6:].strip() 1504 if line: 1505 consumer.origin_name(line) 1506 break 1507 else: 1508 raise ValueError('Expected CONTIG continuation line, got:\n' + line) 1509 consumer.contig_location(contig_location) 1510 return 1511 except StopIteration: 1512 raise ValueError("Problem in misc lines before sequence")
1513 1514 if __name__ == "__main__": 1515 from Bio._py3k import StringIO 1516 1517 gbk_example = \ 1518 """LOCUS SCU49845 5028 bp DNA PLN 21-JUN-1999 1519 DEFINITION Saccharomyces cerevisiae TCP1-beta gene, partial cds, and Axl2p 1520 (AXL2) and Rev7p (REV7) genes, complete cds. 1521 ACCESSION U49845 1522 VERSION U49845.1 GI:1293613 1523 KEYWORDS . 1524 SOURCE Saccharomyces cerevisiae (baker's yeast) 1525 ORGANISM Saccharomyces cerevisiae 1526 Eukaryota; Fungi; Ascomycota; Saccharomycotina; Saccharomycetes; 1527 Saccharomycetales; Saccharomycetaceae; Saccharomyces. 1528 REFERENCE 1 (bases 1 to 5028) 1529 AUTHORS Torpey,L.E., Gibbs,P.E., Nelson,J. and Lawrence,C.W. 1530 TITLE Cloning and sequence of REV7, a gene whose function is required for 1531 DNA damage-induced mutagenesis in Saccharomyces cerevisiae 1532 JOURNAL Yeast 10 (11), 1503-1509 (1994) 1533 PUBMED 7871890 1534 REFERENCE 2 (bases 1 to 5028) 1535 AUTHORS Roemer,T., Madden,K., Chang,J. and Snyder,M. 1536 TITLE Selection of axial growth sites in yeast requires Axl2p, a novel 1537 plasma membrane glycoprotein 1538 JOURNAL Genes Dev. 10 (7), 777-793 (1996) 1539 PUBMED 8846915 1540 REFERENCE 3 (bases 1 to 5028) 1541 AUTHORS Roemer,T. 1542 TITLE Direct Submission 1543 JOURNAL Submitted (22-FEB-1996) Terry Roemer, Biology, Yale University, New 1544 Haven, CT, USA 1545 FEATURES Location/Qualifiers 1546 source 1..5028 1547 /organism="Saccharomyces cerevisiae" 1548 /db_xref="taxon:4932" 1549 /chromosome="IX" 1550 /map="9" 1551 CDS <1..206 1552 /codon_start=3 1553 /product="TCP1-beta" 1554 /protein_id="AAA98665.1" 1555 /db_xref="GI:1293614" 1556 /translation="SSIYNGISTSGLDLNNGTIADMRQLGIVESYKLKRAVVSSASEA 1557 AEVLLRVDNIIRARPRTANRQHM" 1558 gene 687..3158 1559 /gene="AXL2" 1560 CDS 687..3158 1561 /gene="AXL2" 1562 /note="plasma membrane glycoprotein" 1563 /codon_start=1 1564 /function="required for axial budding pattern of S. 1565 cerevisiae" 1566 /product="Axl2p" 1567 /protein_id="AAA98666.1" 1568 /db_xref="GI:1293615" 1569 /translation="MTQLQISLLLTATISLLHLVVATPYEAYPIGKQYPPVARVNESF 1570 TFQISNDTYKSSVDKTAQITYNCFDLPSWLSFDSSSRTFSGEPSSDLLSDANTTLYFN 1571 VILEGTDSADSTSLNNTYQFVVTNRPSISLSSDFNLLALLKNYGYTNGKNALKLDPNE 1572 VFNVTFDRSMFTNEESIVSYYGRSQLYNAPLPNWLFFDSGELKFTGTAPVINSAIAPE 1573 TSYSFVIIATDIEGFSAVEVEFELVIGAHQLTTSIQNSLIINVTDTGNVSYDLPLNYV 1574 YLDDDPISSDKLGSINLLDAPDWVALDNATISGSVPDELLGKNSNPANFSVSIYDTYG 1575 DVIYFNFEVVSTTDLFAISSLPNINATRGEWFSYYFLPSQFTDYVNTNVSLEFTNSSQ 1576 DHDWVKFQSSNLTLAGEVPKNFDKLSLGLKANQGSQSQELYFNIIGMDSKITHSNHSA 1577 NATSTRSSHHSTSTSSYTSSTYTAKISSTSAAATSSAPAALPAANKTSSHNKKAVAIA 1578 CGVAIPLGVILVALICFLIFWRRRRENPDDENLPHAISGPDLNNPANKPNQENATPLN 1579 NPFDDDASSYDDTSIARRLAALNTLKLDNHSATESDISSVDEKRDSLSGMNTYNDQFQ 1580 SQSKEELLAKPPVQPPESPFFDPQNRSSSVYMDSEPAVNKSWRYTGNLSPVSDIVRDS 1581 YGSQKTVDTEKLFDLEAPEKEKRTSRDVTMSSLDPWNSNISPSPVRKSVTPSPYNVTK 1582 HRNRHLQNIQDSQSGKNGITPTTMSTSSSDDFVPVKDGENFCWVHSMEPDRRPSKKRL 1583 VDFSNKSNVNVGQVKDIHGRIPEML" 1584 gene complement(3300..4037) 1585 /gene="REV7" 1586 CDS complement(3300..4037) 1587 /gene="REV7" 1588 /codon_start=1 1589 /product="Rev7p" 1590 /protein_id="AAA98667.1" 1591 /db_xref="GI:1293616" 1592 /translation="MNRWVEKWLRVYLKCYINLILFYRNVYPPQSFDYTTYQSFNLPQ 1593 FVPINRHPALIDYIEELILDVLSKLTHVYRFSICIINKKNDLCIEKYVLDFSELQHVD 1594 KDDQIITETEVFDEFRSSLNSLIMHLEKLPKVNDDTITFEAVINAIELELGHKLDRNR 1595 RVDSLEEKAEIERDSNWVKCQEDENLPDNNGFQPPKIKLTSLVGSDVGPLIIHQFSEK 1596 LISGDDKILNGVYSQYEEGESIFGSLF" 1597 ORIGIN 1598 1 gatcctccat atacaacggt atctccacct caggtttaga tctcaacaac ggaaccattg 1599 61 ccgacatgag acagttaggt atcgtcgaga gttacaagct aaaacgagca gtagtcagct 1600 121 ctgcatctga agccgctgaa gttctactaa gggtggataa catcatccgt gcaagaccaa 1601 181 gaaccgccaa tagacaacat atgtaacata tttaggatat acctcgaaaa taataaaccg 1602 241 ccacactgtc attattataa ttagaaacag aacgcaaaaa ttatccacta tataattcaa 1603 301 agacgcgaaa aaaaaagaac aacgcgtcat agaacttttg gcaattcgcg tcacaaataa 1604 361 attttggcaa cttatgtttc ctcttcgagc agtactcgag ccctgtctca agaatgtaat 1605 421 aatacccatc gtaggtatgg ttaaagatag catctccaca acctcaaagc tccttgccga 1606 481 gagtcgccct cctttgtcga gtaattttca cttttcatat gagaacttat tttcttattc 1607 541 tttactctca catcctgtag tgattgacac tgcaacagcc accatcacta gaagaacaga 1608 601 acaattactt aatagaaaaa ttatatcttc ctcgaaacga tttcctgctt ccaacatcta 1609 661 cgtatatcaa gaagcattca cttaccatga cacagcttca gatttcatta ttgctgacag 1610 721 ctactatatc actactccat ctagtagtgg ccacgcccta tgaggcatat cctatcggaa 1611 781 aacaataccc cccagtggca agagtcaatg aatcgtttac atttcaaatt tccaatgata 1612 841 cctataaatc gtctgtagac aagacagctc aaataacata caattgcttc gacttaccga 1613 901 gctggctttc gtttgactct agttctagaa cgttctcagg tgaaccttct tctgacttac 1614 961 tatctgatgc gaacaccacg ttgtatttca atgtaatact cgagggtacg gactctgccg 1615 1021 acagcacgtc tttgaacaat acataccaat ttgttgttac aaaccgtcca tccatctcgc 1616 1081 tatcgtcaga tttcaatcta ttggcgttgt taaaaaacta tggttatact aacggcaaaa 1617 1141 acgctctgaa actagatcct aatgaagtct tcaacgtgac ttttgaccgt tcaatgttca 1618 1201 ctaacgaaga atccattgtg tcgtattacg gacgttctca gttgtataat gcgccgttac 1619 1261 ccaattggct gttcttcgat tctggcgagt tgaagtttac tgggacggca ccggtgataa 1620 1321 actcggcgat tgctccagaa acaagctaca gttttgtcat catcgctaca gacattgaag 1621 1381 gattttctgc cgttgaggta gaattcgaat tagtcatcgg ggctcaccag ttaactacct 1622 1441 ctattcaaaa tagtttgata atcaacgtta ctgacacagg taacgtttca tatgacttac 1623 1501 ctctaaacta tgtttatctc gatgacgatc ctatttcttc tgataaattg ggttctataa 1624 1561 acttattgga tgctccagac tgggtggcat tagataatgc taccatttcc gggtctgtcc 1625 1621 cagatgaatt actcggtaag aactccaatc ctgccaattt ttctgtgtcc atttatgata 1626 1681 cttatggtga tgtgatttat ttcaacttcg aagttgtctc cacaacggat ttgtttgcca 1627 1741 ttagttctct tcccaatatt aacgctacaa ggggtgaatg gttctcctac tattttttgc 1628 1801 cttctcagtt tacagactac gtgaatacaa acgtttcatt agagtttact aattcaagcc 1629 1861 aagaccatga ctgggtgaaa ttccaatcat ctaatttaac attagctgga gaagtgccca 1630 1921 agaatttcga caagctttca ttaggtttga aagcgaacca aggttcacaa tctcaagagc 1631 1981 tatattttaa catcattggc atggattcaa agataactca ctcaaaccac agtgcgaatg 1632 2041 caacgtccac aagaagttct caccactcca cctcaacaag ttcttacaca tcttctactt 1633 2101 acactgcaaa aatttcttct acctccgctg ctgctacttc ttctgctcca gcagcgctgc 1634 2161 cagcagccaa taaaacttca tctcacaata aaaaagcagt agcaattgcg tgcggtgttg 1635 2221 ctatcccatt aggcgttatc ctagtagctc tcatttgctt cctaatattc tggagacgca 1636 2281 gaagggaaaa tccagacgat gaaaacttac cgcatgctat tagtggacct gatttgaata 1637 2341 atcctgcaaa taaaccaaat caagaaaacg ctacaccttt gaacaacccc tttgatgatg 1638 2401 atgcttcctc gtacgatgat acttcaatag caagaagatt ggctgctttg aacactttga 1639 2461 aattggataa ccactctgcc actgaatctg atatttccag cgtggatgaa aagagagatt 1640 2521 ctctatcagg tatgaataca tacaatgatc agttccaatc ccaaagtaaa gaagaattat 1641 2581 tagcaaaacc cccagtacag cctccagaga gcccgttctt tgacccacag aataggtctt 1642 2641 cttctgtgta tatggatagt gaaccagcag taaataaatc ctggcgatat actggcaacc 1643 2701 tgtcaccagt ctctgatatt gtcagagaca gttacggatc acaaaaaact gttgatacag 1644 2761 aaaaactttt cgatttagaa gcaccagaga aggaaaaacg tacgtcaagg gatgtcacta 1645 2821 tgtcttcact ggacccttgg aacagcaata ttagcccttc tcccgtaaga aaatcagtaa 1646 2881 caccatcacc atataacgta acgaagcatc gtaaccgcca cttacaaaat attcaagact 1647 2941 ctcaaagcgg taaaaacgga atcactccca caacaatgtc aacttcatct tctgacgatt 1648 3001 ttgttccggt taaagatggt gaaaattttt gctgggtcca tagcatggaa ccagacagaa 1649 3061 gaccaagtaa gaaaaggtta gtagattttt caaataagag taatgtcaat gttggtcaag 1650 3121 ttaaggacat tcacggacgc atcccagaaa tgctgtgatt atacgcaacg atattttgct 1651 3181 taattttatt ttcctgtttt attttttatt agtggtttac agatacccta tattttattt 1652 3241 agtttttata cttagagaca tttaatttta attccattct tcaaatttca tttttgcact 1653 3301 taaaacaaag atccaaaaat gctctcgccc tcttcatatt gagaatacac tccattcaaa 1654 3361 attttgtcgt caccgctgat taatttttca ctaaactgat gaataatcaa aggccccacg 1655 3421 tcagaaccga ctaaagaagt gagttttatt ttaggaggtt gaaaaccatt attgtctggt 1656 3481 aaattttcat cttcttgaca tttaacccag tttgaatccc tttcaatttc tgctttttcc 1657 3541 tccaaactat cgaccctcct gtttctgtcc aacttatgtc ctagttccaa ttcgatcgca 1658 3601 ttaataactg cttcaaatgt tattgtgtca tcgttgactt taggtaattt ctccaaatgc 1659 3661 ataatcaaac tatttaagga agatcggaat tcgtcgaaca cttcagtttc cgtaatgatc 1660 3721 tgatcgtctt tatccacatg ttgtaattca ctaaaatcta aaacgtattt ttcaatgcat 1661 3781 aaatcgttct ttttattaat aatgcagatg gaaaatctgt aaacgtgcgt taatttagaa 1662 3841 agaacatcca gtataagttc ttctatatag tcaattaaag caggatgcct attaatggga 1663 3901 acgaactgcg gcaagttgaa tgactggtaa gtagtgtagt cgaatgactg aggtgggtat 1664 3961 acatttctat aaaataaaat caaattaatg tagcatttta agtataccct cagccacttc 1665 4021 tctacccatc tattcataaa gctgacgcaa cgattactat tttttttttc ttcttggatc 1666 4081 tcagtcgtcg caaaaacgta taccttcttt ttccgacctt ttttttagct ttctggaaaa 1667 4141 gtttatatta gttaaacagg gtctagtctt agtgtgaaag ctagtggttt cgattgactg 1668 4201 atattaagaa agtggaaatt aaattagtag tgtagacgta tatgcatatg tatttctcgc 1669 4261 ctgtttatgt ttctacgtac ttttgattta tagcaagggg aaaagaaata catactattt 1670 4321 tttggtaaag gtgaaagcat aatgtaaaag ctagaataaa atggacgaaa taaagagagg 1671 4381 cttagttcat cttttttcca aaaagcaccc aatgataata actaaaatga aaaggatttg 1672 4441 ccatctgtca gcaacatcag ttgtgtgagc aataataaaa tcatcacctc cgttgccttt 1673 4501 agcgcgtttg tcgtttgtat cttccgtaat tttagtctta tcaatgggaa tcataaattt 1674 4561 tccaatgaat tagcaatttc gtccaattct ttttgagctt cttcatattt gctttggaat 1675 4621 tcttcgcact tcttttccca ttcatctctt tcttcttcca aagcaacgat ccttctaccc 1676 4681 atttgctcag agttcaaatc ggcctctttc agtttatcca ttgcttcctt cagtttggct 1677 4741 tcactgtctt ctagctgttg ttctagatcc tggtttttct tggtgtagtt ctcattatta 1678 4801 gatctcaagt tattggagtc ttcagccaat tgctttgtat cagacaattg actctctaac 1679 4861 ttctccactt cactgtcgag ttgctcgttt ttagcggaca aagatttaat ctcgttttct 1680 4921 ttttcagtgt tagattgctc taattctttg agctgttctc tcagctcctc atatttttct 1681 4981 tgccatgact cagattctaa ttttaagcta ttcaatttct ctttgatc 1682 //""" 1683 1684 # GenBank format protein (aka GenPept) file from: 1685 # http://www.molecularevolution.org/resources/fileformats/ 1686 gbk_example2 = \ 1687 """LOCUS AAD51968 143 aa linear BCT 21-AUG-2001 1688 DEFINITION transcriptional regulator RovA [Yersinia enterocolitica]. 1689 ACCESSION AAD51968 1690 VERSION AAD51968.1 GI:5805369 1691 DBSOURCE locus AF171097 accession AF171097.1 1692 KEYWORDS . 1693 SOURCE Yersinia enterocolitica 1694 ORGANISM Yersinia enterocolitica 1695 Bacteria; Proteobacteria; Gammaproteobacteria; Enterobacteriales; 1696 Enterobacteriaceae; Yersinia. 1697 REFERENCE 1 (residues 1 to 143) 1698 AUTHORS Revell,P.A. and Miller,V.L. 1699 TITLE A chromosomally encoded regulator is required for expression of the 1700 Yersinia enterocolitica inv gene and for virulence 1701 JOURNAL Mol. Microbiol. 35 (3), 677-685 (2000) 1702 MEDLINE 20138369 1703 PUBMED 10672189 1704 REFERENCE 2 (residues 1 to 143) 1705 AUTHORS Revell,P.A. and Miller,V.L. 1706 TITLE Direct Submission 1707 JOURNAL Submitted (22-JUL-1999) Molecular Microbiology, Washington 1708 University School of Medicine, Campus Box 8230, 660 South Euclid, 1709 St. Louis, MO 63110, USA 1710 COMMENT Method: conceptual translation. 1711 FEATURES Location/Qualifiers 1712 source 1..143 1713 /organism="Yersinia enterocolitica" 1714 /mol_type="unassigned DNA" 1715 /strain="JB580v" 1716 /serotype="O:8" 1717 /db_xref="taxon:630" 1718 Protein 1..143 1719 /product="transcriptional regulator RovA" 1720 /name="regulates inv expression" 1721 CDS 1..143 1722 /gene="rovA" 1723 /coded_by="AF171097.1:380..811" 1724 /note="regulator of virulence" 1725 /transl_table=11 1726 ORIGIN 1727 1 mestlgsdla rlvrvwrali dhrlkplelt qthwvtlhni nrlppeqsqi qlakaigieq 1728 61 pslvrtldql eekglitrht candrrakri klteqsspii eqvdgvicst rkeilggisp 1729 121 deiellsgli dklerniiql qsk 1730 // 1731 """ 1732 1733 embl_example = """ID X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP. 1734 XX 1735 AC X56734; S46826; 1736 XX 1737 DT 12-SEP-1991 (Rel. 29, Created) 1738 DT 25-NOV-2005 (Rel. 85, Last updated, Version 11) 1739 XX 1740 DE Trifolium repens mRNA for non-cyanogenic beta-glucosidase 1741 XX 1742 KW beta-glucosidase. 1743 XX 1744 OS Trifolium repens (white clover) 1745 OC Eukaryota; Viridiplantae; Streptophyta; Embryophyta; Tracheophyta; 1746 OC Spermatophyta; Magnoliophyta; eudicotyledons; core eudicotyledons; rosids; 1747 OC eurosids I; Fabales; Fabaceae; Papilionoideae; Trifolieae; Trifolium. 1748 XX 1749 RN [5] 1750 RP 1-1859 1751 RX PUBMED; 1907511. 1752 RA Oxtoby E., Dunn M.A., Pancoro A., Hughes M.A.; 1753 RT "Nucleotide and derived amino acid sequence of the cyanogenic 1754 RT beta-glucosidase (linamarase) from white clover (Trifolium repens L.)"; 1755 RL Plant Mol. Biol. 17(2):209-219(1991). 1756 XX 1757 RN [6] 1758 RP 1-1859 1759 RA Hughes M.A.; 1760 RT ; 1761 RL Submitted (19-NOV-1990) to the EMBL/GenBank/DDBJ databases. 1762 RL Hughes M.A., University of Newcastle Upon Tyne, Medical School, Newcastle 1763 RL Upon Tyne, NE2 4HH, UK 1764 XX 1765 FH Key Location/Qualifiers 1766 FH 1767 FT source 1..1859 1768 FT /organism="Trifolium repens" 1769 FT /mol_type="mRNA" 1770 FT /clone_lib="lambda gt10" 1771 FT /clone="TRE361" 1772 FT /tissue_type="leaves" 1773 FT /db_xref="taxon:3899" 1774 FT CDS 14..1495 1775 FT /product="beta-glucosidase" 1776 FT /EC_number="3.2.1.21" 1777 FT /note="non-cyanogenic" 1778 FT /db_xref="GOA:P26204" 1779 FT /db_xref="InterPro:IPR001360" 1780 FT /db_xref="InterPro:IPR013781" 1781 FT /db_xref="UniProtKB/Swiss-Prot:P26204" 1782 FT /protein_id="CAA40058.1" 1783 FT /translation="MDFIVAIFALFVISSFTITSTNAVEASTLLDIGNLSRSSFPRGFI 1784 FT FGAGSSAYQFEGAVNEGGRGPSIWDTFTHKYPEKIRDGSNADITVDQYHRYKEDVGIMK 1785 FT DQNMDSYRFSISWPRILPKGKLSGGINHEGIKYYNNLINELLANGIQPFVTLFHWDLPQ 1786 FT VLEDEYGGFLNSGVINDFRDYTDLCFKEFGDRVRYWSTLNEPWVFSNSGYALGTNAPGR 1787 FT CSASNVAKPGDSGTGPYIVTHNQILAHAEAVHVYKTKYQAYQKGKIGITLVSNWLMPLD 1788 FT DNSIPDIKAAERSLDFQFGLFMEQLTTGDYSKSMRRIVKNRLPKFSKFESSLVNGSFDF 1789 FT IGINYYSSSYISNAPSHGNAKPSYSTNPMTNISFEKHGIPLGPRAASIWIYVYPYMFIQ 1790 FT EDFEIFCYILKINITILQFSITENGMNEFNDATLPVEEALLNTYRIDYYYRHLYYIRSA 1791 FT IRAGSNVKGFYAWSFLDCNEWFAGFTVRFGLNFVD" 1792 FT mRNA 1..1859 1793 FT /experiment="experimental evidence, no additional details 1794 FT recorded" 1795 XX 1796 SQ Sequence 1859 BP; 609 A; 314 C; 355 G; 581 T; 0 other; 1797 aaacaaacca aatatggatt ttattgtagc catatttgct ctgtttgtta ttagctcatt 60 1798 cacaattact tccacaaatg cagttgaagc ttctactctt cttgacatag gtaacctgag 120 1799 tcggagcagt tttcctcgtg gcttcatctt tggtgctgga tcttcagcat accaatttga 180 1800 aggtgcagta aacgaaggcg gtagaggacc aagtatttgg gataccttca cccataaata 240 1801 tccagaaaaa ataagggatg gaagcaatgc agacatcacg gttgaccaat atcaccgcta 300 1802 caaggaagat gttgggatta tgaaggatca aaatatggat tcgtatagat tctcaatctc 360 1803 ttggccaaga atactcccaa agggaaagtt gagcggaggc ataaatcacg aaggaatcaa 420 1804 atattacaac aaccttatca acgaactatt ggctaacggt atacaaccat ttgtaactct 480 1805 ttttcattgg gatcttcccc aagtcttaga agatgagtat ggtggtttct taaactccgg 540 1806 tgtaataaat gattttcgag actatacgga tctttgcttc aaggaatttg gagatagagt 600 1807 gaggtattgg agtactctaa atgagccatg ggtgtttagc aattctggat atgcactagg 660 1808 aacaaatgca ccaggtcgat gttcggcctc caacgtggcc aagcctggtg attctggaac 720 1809 aggaccttat atagttacac acaatcaaat tcttgctcat gcagaagctg tacatgtgta 780 1810 taagactaaa taccaggcat atcaaaaggg aaagataggc ataacgttgg tatctaactg 840 1811 gttaatgcca cttgatgata atagcatacc agatataaag gctgccgaga gatcacttga 900 1812 cttccaattt ggattgttta tggaacaatt aacaacagga gattattcta agagcatgcg 960 1813 gcgtatagtt aaaaaccgat tacctaagtt ctcaaaattc gaatcaagcc tagtgaatgg 1020 1814 ttcatttgat tttattggta taaactatta ctcttctagt tatattagca atgccccttc 1080 1815 acatggcaat gccaaaccca gttactcaac aaatcctatg accaatattt catttgaaaa 1140 1816 acatgggata cccttaggtc caagggctgc ttcaatttgg atatatgttt atccatatat 1200 1817 gtttatccaa gaggacttcg agatcttttg ttacatatta aaaataaata taacaatcct 1260 1818 gcaattttca atcactgaaa atggtatgaa tgaattcaac gatgcaacac ttccagtaga 1320 1819 agaagctctt ttgaatactt acagaattga ttactattac cgtcacttat actacattcg 1380 1820 ttctgcaatc agggctggct caaatgtgaa gggtttttac gcatggtcat ttttggactg 1440 1821 taatgaatgg tttgcaggct ttactgttcg ttttggatta aactttgtag attagaaaga 1500 1822 tggattaaaa aggtacccta agctttctgc ccaatggtac aagaactttc tcaaaagaaa 1560 1823 ctagctagta ttattaaaag aactttgtag tagattacag tacatcgttt gaagttgagt 1620 1824 tggtgcacct aattaaataa aagaggttac tcttaacata tttttaggcc attcgttgtg 1680 1825 aagttgttag gctgttattt ctattatact atgttgtagt aataagtgca ttgttgtacc 1740 1826 agaagctatg atcataacta taggttgatc cttcatgtat cagtttgatg ttgagaatac 1800 1827 tttgaattaa aagtcttttt ttattttttt aaaaaaaaaa aaaaaaaaaa aaaaaaaaa 1859 1828 // 1829 """ 1830 1831 print("GenBank CDS Iteration") 1832 print("=====================") 1833 1834 g = GenBankScanner() 1835 for record in g.parse_cds_features(StringIO(gbk_example)): 1836 print(record) 1837 1838 g = GenBankScanner() 1839 for record in g.parse_cds_features(StringIO(gbk_example2), 1840 tags2id=('gene', 'locus_tag', 'product')): 1841 print(record) 1842 1843 g = GenBankScanner() 1844 for record in g.parse_cds_features(StringIO(gbk_example + "\n" + gbk_example2), 1845 tags2id=('gene', 'locus_tag', 'product')): 1846 print(record) 1847 1848 print("") 1849 print("GenBank Iteration") 1850 print("=================") 1851 g = GenBankScanner() 1852 for record in g.parse_records(StringIO(gbk_example), do_features=False): 1853 print("%s %s %s" % (record.id, record.name, record.description)) 1854 print(record.seq) 1855 1856 g = GenBankScanner() 1857 for record in g.parse_records(StringIO(gbk_example), do_features=True): 1858 print("%s %s %s" % (record.id, record.name, record.description)) 1859 print(record.seq) 1860 1861 g = GenBankScanner() 1862 for record in g.parse_records(StringIO(gbk_example2), do_features=False): 1863 print("%s %s %s" % (record.id, record.name, record.description)) 1864 print(record.seq) 1865 1866 g = GenBankScanner() 1867 for record in g.parse_records(StringIO(gbk_example2), do_features=True): 1868 print("%s %s %s" % (record.id, record.name, record.description)) 1869 print(record.seq) 1870 1871 print("") 1872 print("EMBL CDS Iteration") 1873 print("==================") 1874 1875 e = EmblScanner() 1876 for record in e.parse_cds_features(StringIO(embl_example)): 1877 print(record) 1878 1879 print("") 1880 print("EMBL Iteration") 1881 print("==============") 1882 e = EmblScanner() 1883 for record in e.parse_records(StringIO(embl_example), do_features=True): 1884 print("%s %s %s" % (record.id, record.name, record.description)) 1885 print(record.seq) 1886