[Bio] / Sprout / SimLoad.pl Repository:
ViewVC logotype

Annotation of /Sprout/SimLoad.pl

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.8 - (view) (download) (as text)

1 : parrello 1.1 #!/usr/bin/perl -w
2 :    
3 :     use strict;
4 :     use CGI;
5 :     use Tracer;
6 :     use Genome;
7 : parrello 1.7 use DBKernel;
8 : parrello 1.1 use SimBlocks;
9 :     use File::Path;
10 : parrello 1.2 use BasicLocation;
11 : parrello 1.3 use Cwd;
12 : parrello 1.1
13 :     =head1 Similarity Block Loader
14 :    
15 :     This script loads the similarity block database from
16 : parrello 1.3 the input files. The load process involves two steps:
17 :     converting the input files into C<dtx> load files
18 :     (B<generate>), and loading the C<dtx> files into the
19 :     database (B<load>).
20 : parrello 1.1
21 : parrello 1.3 The script takes a single parameter-- a directory name.
22 : parrello 1.7 The default directory name is C<"$FIG_Config::data/CloseStrainSets">.
23 :     The input files should be in subdirectories called
24 :     C<Block> under the subdirectories of the input directory.
25 :     The subdirectory names themselves are considered the name
26 :     of the close-strain set. So, for example
27 :    
28 :     Data/CloseStrainSets/Vibrio/Block
29 :    
30 :     would be presumed to contain the Vibrio strains.
31 :    
32 :     The output files will be produced in the similarity block
33 :     data directory C<$fig_config::SimBlockData>, which will be
34 :     created if it does not exist. The input directory and all its
35 : parrello 1.3 subdirectories will be processed for input files.
36 : parrello 1.1
37 :     In addition to the directory name, the following
38 :     option parameters are supported.
39 :    
40 :     =over 4
41 :    
42 :     =item trace
43 :    
44 :     Trace level for output messages. A higher number means more
45 : parrello 1.4 messages. The default is C<3>. Trace messages are sent to
46 : parrello 1.3 the file C<trace.log> in the B<$FIG_Config::tmp>
47 :     directory.
48 : parrello 1.1
49 : parrello 1.4 =item sql
50 :    
51 :     If specified, SQL activity will be traced at the specified
52 :     trace level.
53 :    
54 : parrello 1.1 =item load
55 :    
56 :     C<yes> to load the data into the database, else C<no>.
57 :     The default is C<yes>.
58 :    
59 :     =item generate
60 :    
61 :     C<yes> to generate output files from input files, else
62 :     C<no>. The default is C<yes>.
63 :    
64 : parrello 1.6 =item createDB
65 :    
66 :     If specified, the database will be dropped and
67 :     re-created before loading.
68 :    
69 : parrello 1.1 =back
70 :    
71 :     For example, the following command line will process the
72 : parrello 1.7 input files in the C</Users/fig/BlastData> directory tree
73 :     and run at a trace level of 3.
74 : parrello 1.1
75 : parrello 1.3 C<< SimLoad -trace=3 /Users/fig/BlastData >>
76 :    
77 :     The following command line converts the input files in
78 :     the default directory into load files but does not load
79 :     the database and runs at a trace level of 2.
80 : parrello 1.1
81 : parrello 1.3 C<< SimLoad -load=no >>
82 : parrello 1.1
83 :     =head2 Input Directory
84 :    
85 : parrello 1.7 The following files must exist in each C<Block> directory
86 :     under the input directory.
87 : parrello 1.1
88 :     =over 4
89 :    
90 : parrello 1.8 =item genome.tbl
91 : parrello 1.1
92 :     This is a tab-delimited file that contains the ID of each
93 :     genome followed by a description string.
94 :    
95 : parrello 1.8 =item block.tbl, intergenic_block.tbl
96 : parrello 1.1
97 :     These are tab-delimited files that associate a gene name
98 :     with each block. The InterGenic file is optional.
99 :    
100 : parrello 1.8 =item region.tbl, intergenic_region.tbl
101 : parrello 1.1
102 :     These are tab-delimited files that describe each region
103 :     of a block. The InterGenic file is optional.
104 :    
105 :     =back
106 :    
107 :     The format of each file is given below.
108 :    
109 : parrello 1.8 =head3 genome.tbl
110 : parrello 1.1
111 :     The Genome file is copied almost unmodified to the
112 :     load file for the B<Genome> entity. Each record
113 :     represents a single genome. It has the following
114 :     fields.
115 :    
116 :     =over 4
117 :    
118 :     =item genomeID
119 :    
120 :     The ID of the genome.
121 :    
122 :     =item description
123 :    
124 :     A text description of the genome (usually the species name with
125 :     a strain ID).
126 :    
127 : parrello 1.8 =teim groupName
128 :    
129 :     The name of the group to which the genome belongs.
130 :    
131 : parrello 1.1 =back
132 :    
133 : parrello 1.8 =head3 block.tbl, intergenic_block.tbl
134 : parrello 1.1
135 :     These files produce most of the data found in the B<GroupBlock>
136 :     entity. Each record represents a single block. Blocks either
137 :     correspond to genes or to inter-genic regions. Both types
138 :     of blocks may appear in multiple locations in multiple
139 :     contigs. The files should be sorted by block ID.
140 :    
141 :     =over 4
142 :    
143 :     =item blockID
144 :    
145 :     The unique ID of the block. This ID is also used in the
146 :     C<Region.tbl> file.
147 :    
148 :     =item blockName
149 :    
150 :     The name of the block. For a gene block, this is the gene
151 :     name. For an inter-genic block, it is a name computed
152 :     from the names of the genes that are commonly nearby.
153 :    
154 :     =back
155 :    
156 : parrello 1.8 =head3 region.tbl, intergenic_region.tbl
157 : parrello 1.1
158 :     These files describe the regions in each block. They are
159 :     used to derive the relationships between genomes and
160 :     contigs (B<ConsistsOf>), the contigs themselves
161 :     (B<Contig>), the relationships between blocks and
162 :     contigs (B<ContainsRegionIn>), and the derived
163 :     relationship between genomes and blocks
164 :     (B<HasInstanceOf>). The files must be sorted by block
165 :     ID, and each record in a file represents a single
166 :     region in a contig. Each region belongs to a
167 :     single block. Note that the C<Region.tbl> file contains
168 :     the regions for the C<Block.tbl> file, and the
169 :     C<InterGenic_Region.tbl> file contains the regions for
170 :     the C<InterGenic_Block.tbl> file. No mixing is allowed.
171 :    
172 :     =over 4
173 :    
174 :     =item regionPEG
175 :    
176 :     PEG ID for this region. If the region is in an
177 :     inter-genic block, this field will be composed of
178 :     the IDs for the neighboring genes.
179 :    
180 :     =item genomeID
181 :    
182 :     ID of the relevant genome.
183 :    
184 :     =item contigID
185 :    
186 :     ID of the contig containing this region. This is a standard contig
187 :     ID that does not include the genome ID. It will be converted to
188 :     a Sprout-style contig ID (which includes the genome data) before
189 :     it is written to the output files.
190 :    
191 :     =item begin
192 :    
193 :     The start point of the region. For a forward region this is the
194 :     left endpoint; for a reverse region it is the right endpoint. It
195 :     is a 1-based offset (which is consistent with Sprout usage), and
196 :     the identified location is inside the region.
197 :    
198 :     =item end
199 :    
200 :     The end point of the region. For a forward region this is the
201 :     right endpoint; for a reverse region it is the left endpoint. It
202 :     is a 1-based offset (which is consistent with Sprout usage), and
203 :     the identified location is inside the region.
204 :    
205 :     =item blockID
206 :    
207 :     The ID of the block containing this region.
208 :    
209 :     =item snippet
210 :    
211 :     A DNA snippet representing the contents of the region. The region
212 :     may be shorter than the block length. If that is the case, the
213 :     snippet will contain insertion characters (C<->). So, while it
214 :     is not the case that every region in a block must be the same
215 :     length, all of the snippets for a block must be the same length.
216 :     The snippets will be in alignment form. In other words, if the
217 :     region is reversed, the nucleotide letters will be the complement
218 :     in transcription order. (For example, if positions 20 through 25
219 :     of contig B<XXX> are C<AGCCTT>, then the snippet for C<XXX_25_20>
220 :     will be C<AAGGCT>.)
221 :    
222 :     =back
223 :    
224 :     =head2 Output File Notes
225 :    
226 :     =over 4
227 :    
228 :     =item Genome.dtx
229 :    
230 :     This file is a direct copy of the C<Genome.tbl> file; however, we
231 :     also use it to create a hash of genome IDs (C<%genomes>). The hash
232 :     is useful when validating the C<Region.tbl> file.
233 :    
234 :     =item Contig.dtx
235 :    
236 :     This file contains nothing but contig IDs. As contigs are
237 :     discovered from the C<Region.tbl> file their IDs are put
238 :     into the C<%contigs> hash. This hash maps contig IDs to
239 :     their parent genome IDs. When processing is complete,
240 :     this file is generated from the hash.
241 :    
242 :     =item GroupBlock.dtx
243 :    
244 :     This file describes the blocks. As records come in from
245 :     C<Region.tbl>, we build a hash called C<%blockData> that
246 :     contains our latest estimate of all the C<GroupBlock.dtx>
247 :     columns for the current block (with the exception of
248 :     B<variance>, which is computed by dividing the B<snip-count>
249 :     by the length (B<len>).
250 :    
251 :     =item ConsistsOf.dtx
252 :    
253 :     This file maps genomes to contigs, and is generated from
254 :     the C<%contigs> hash built while reading the C<Region.tbl>
255 :     file.
256 :    
257 :     =item HasInstanceOf.dtx
258 :    
259 :     This file lists the genomes containing each block. The
260 :     C<Region.tbl> file is sorted by block. While inside a
261 :     block's section of the file, we use a hash called
262 :     C<%genomesFound> that contains the ID of every genome
263 :     found for the block. When we finish with a block,
264 :     we run through the C<%genomesFound> hash to produce
265 :     the block's B<HasInstanceOf> data.
266 :    
267 :     =item Region.dtx
268 :    
269 :     This file describes the contig regions in the blocks.
270 :     As the C<Region.tbl> file is read in, we build a
271 :     hash called C<%regionMap> that maps a region's
272 :     SEED-style location string to the DNA content.
273 :     When we finish with a block, the DNA content is
274 :     converted into an alignment by comparing it to
275 :     the block's pattern in C<%blockData>. (Essentially,
276 :     we only include the region's content for the
277 :     positions that vary between regions in the block.)
278 :     From this and the region string itself, we have
279 :     enough data to create the B<Region>
280 :     data.
281 :    
282 :     =item IncludesRegion.dtx
283 :    
284 :     This file maps group blocks to regions. The first column
285 :     is the block ID and the second column is the SEED-style
286 :     region string for the target region. This file is built
287 :     in parallel with C<Region.dtx>. It will have one record
288 :     for each region.
289 :    
290 :     =item ContainsRegion.dtx
291 :    
292 :     This file maps contigs to regions. The first column is
293 :     the contig ID and the second column is the SEED-style
294 :     location string for the region. It contains two redundant
295 :     columns used for sorting-- the region length (column 3)
296 :     and the left-most region position (column 4). This
297 :     file is built in parallel with C<Region.dtx>. It will
298 :     have one record for each region.
299 :    
300 :     =cut
301 :    
302 :     # Create a huge number we can use for an end-of-file
303 :     # indicator in the block ID.
304 :     my $TRAILER = 999999999;
305 :    
306 :     # Parse the command line.
307 : parrello 1.5 my ($options, @arguments) = StandardSetup(['SimBlocks'],
308 :     { load => ['yes', "'no' to suppress loading the database"],
309 : parrello 1.6 generate => ['yes', "'no' to suppress generating the load files"],
310 :     createDB => [0, 'drop and create the database before loading']
311 : parrello 1.5 },
312 :     "directoryName",
313 : parrello 1.1 @ARGV);
314 :     # Extract the directory name from the argument array.
315 : parrello 1.7 my $inDirectoryTree = ($arguments[0] ? Cwd::abs_path($arguments[0]) : "$FIG_Config::data/CloseStrainSets");
316 :     # Check to see if we need to create the database.
317 :     if ($options->{createDB}) {
318 :     Trace("Creating database.") if T(2);
319 :     DBKernel::CreateDB($FIG_Config::simBlocksDB);
320 : parrello 1.1 }
321 :     # Get the output directory.
322 :     my $outDirectory = $FIG_Config::simBlocksData;
323 :     # Insure that it exists.
324 :     if (! -d $outDirectory) {
325 : parrello 1.3 Trace("Creating output directory $outDirectory.") if T(2);
326 : parrello 1.1 mkpath($outDirectory);
327 : parrello 1.4 } elsif ($options->{generate} eq 'yes') {
328 :     # Here we have an output directory already and are going to generate new
329 :     # load files. Clear any leftover data from previous runs.
330 : parrello 1.3 my @files = grep { $_ =~ /.dtx$/ } Tracer::OpenDir($outDirectory);
331 :     my $numFiles = @files;
332 :     if ($numFiles > 0) {
333 :     Trace("Deleting $numFiles old dtx files from $outDirectory.") if T(2);
334 :     unlink map { "$outDirectory/$_" } @files;
335 :     }
336 : parrello 1.1 }
337 : parrello 1.3 # Create an error counter and a directory counter.
338 : parrello 1.1 my $errorCount = 0;
339 : parrello 1.3 my $dirCount = 0;
340 : parrello 1.1 # Check to see if we should generate the output files.
341 :     if ($options->{generate} eq 'no') {
342 :     # Here we are to use existing output files.
343 : parrello 1.3 Trace("Existing database load files will be used.") if T(2);
344 : parrello 1.1 } else {
345 :     # Here we need to produce new output files.
346 :     # Verify that the input directory exists.
347 : parrello 1.3 if (! -d $inDirectoryTree) {
348 :     Confess("Input directory \"$inDirectoryTree\" not found.");
349 : parrello 1.1 }
350 : parrello 1.3 # Loop through the subdirectories.
351 : parrello 1.7 for my $inputDirectory (Tracer::OpenDir($inDirectoryTree, 1)) {
352 :     # Verify that this is a directory.
353 :     my $inDirectory = "$inDirectoryTree/$inputDirectory/Blocks";
354 :     if (-d $inDirectory) {
355 : parrello 1.3 # Here we have a directory to process. Check for a genome
356 :     # file.
357 : parrello 1.8 my $genomeFileName = "$inDirectory/genome.tbl";
358 : parrello 1.3 if (! -e $genomeFileName) {
359 :     Trace("$genomeFileName not found. Directory skipped.") if T(1);
360 :     } else {
361 :     # Now we can process the directory and accumulate the error
362 :     # count.
363 : parrello 1.7 $errorCount += ProcessDirectory($inDirectory, $outDirectory, $inputDirectory);
364 : parrello 1.3 $dirCount++;
365 :     }
366 :     }
367 :     }
368 :     Trace("Load files generated from $dirCount directories.") if T(2);
369 :     }
370 :     # Check for errors.
371 :     if ($errorCount > 0) {
372 :     Trace("$errorCount errors found in input files.") if T(0);
373 :     } else {
374 :     # No errors, so it's okay to load the database.
375 :     if ($options->{load} eq 'yes') {
376 :     # Here we have no outstanding errors and the user wants us to load
377 :     # the database. First, we create a similarity block object.
378 :     my $simBlocks = SimBlocks->new();
379 :     # Use it to load the database. Note we specify that the tables are to be
380 :     # dropped and rebuilt.
381 :     $simBlocks->LoadTables($outDirectory, 1);
382 :     Trace("Database loaded.") if T(2);
383 :     }
384 :     }
385 :    
386 :     # Process a single input directory.
387 :     sub ProcessDirectory {
388 : parrello 1.7 my ($inDirectory, $outDirectory, $groupName) = @_;
389 : parrello 1.3 Trace("Processing directory $inDirectory.") if T(2);
390 : parrello 1.1 # Our first task is to copy the genome data to the output directory
391 : parrello 1.7 # and add the genomes to the genome list.
392 : parrello 1.1 my %genomes = ();
393 : parrello 1.8 Open(\*GENOMESIN, "<$inDirectory/genome.tbl");
394 : parrello 1.3 Open(\*GENOMESOUT, ">>$outDirectory/Genome.dtx");
395 :     # Count the genomes read and errors found.
396 : parrello 1.1 my $genomeCount = 0;
397 : parrello 1.3 my $errorCount = 0;
398 : parrello 1.1 # Loop through the input.
399 :     while (my $genomeData = <GENOMESIN>) {
400 :     # Echo the genome record to the output.
401 : parrello 1.7 my @fields = Tracer::ParseRecord($genomeData);
402 :     print GENOMESOUT join("\t", @fields, $groupName). "\n";
403 : parrello 1.1 # Extract the genome ID.
404 : parrello 1.7 my $genomeID = $fields[0];
405 : parrello 1.1 # Store it in the genomes hash. We start with a value of 0. If
406 :     # contig information for the genome is found, we change the value
407 :     # to 1. When we're all done with the regions, we can check the
408 :     # hash to insure all the genomes were represented in the input.
409 :     $genomes{$genomeID} = 0;
410 :     # Count this genome.
411 :     $genomeCount++;
412 :     }
413 : parrello 1.3 Trace("$genomeCount genomes found.") if T(2);
414 : parrello 1.1 # Close the files.
415 :     close GENOMESIN;
416 :     close GENOMESOUT;
417 :     # Create the contig hash used to associate contigs to their parent
418 :     # genomes.
419 :     my %contigs = ();
420 :     # Now we begin to read the Block and Region files in parallel. Both
421 :     # are sorted by block ID, so all processing for this section of the
422 :     # script is done a block at a time. The first task is to
423 :     # open the output files.
424 : parrello 1.3 Open(\*BLOCKSOUT, ">>$outDirectory/GroupBlock.dtx");
425 :     Open(\*REGIONSOUT, ">>$outDirectory/Region.dtx");
426 :     Open(\*INSTANCESOUT, ">>$outDirectory/HasInstanceOf.dtx");
427 :     Open(\*CONTAINSOUT, ">>$outDirectory/ContainsRegion.dtx");
428 :     Open(\*INCLUDESOUT, ">>$outDirectory/IncludesRegion.dtx");
429 : parrello 1.1 # Determine which file sets we'll be processing.
430 :     my @fileSets = ();
431 : parrello 1.8 my @prefixes = ("", "intergenic_");
432 : parrello 1.1 for my $prefix (@prefixes) {
433 : parrello 1.8 if (-e "$inDirectory/${prefix}block.tbl") {
434 : parrello 1.1 push @fileSets, $prefix;
435 :     }
436 :     }
437 : parrello 1.3 # Set up the duplicate-region check.
438 :     my %allRegions = ();
439 : parrello 1.1 # Set up some counters.
440 :     my ($blocksCount, $regionsCount) = (0, 0);
441 :     # Loop through the useful file sets.
442 :     for my $fileSet (@fileSets) {
443 : parrello 1.8 Open(\*BLOCKSIN, "<$inDirectory/${fileSet}block.tbl");
444 :     Open(\*REGIONSIN, "<$inDirectory/${fileSet}region.tbl");
445 : parrello 1.3 Trace("Processing ${fileSet}Blocks.") if T(2);
446 : parrello 1.1 # The outer loop processes blocks. This is accomplished by reading
447 :     # through the block file. We prime the loop by reading the first
448 :     # region record. This is because we finish processing a block when
449 :     # the first record of the next block is found in the region file.
450 :     my %regionRecord = GetRegionRecord();
451 :     $regionsCount++;
452 :     while (my $blockRecord = <BLOCKSIN>) {
453 :     $blocksCount++;
454 :     # Parse the block record.
455 :     my ($blockID, $blockName, $pegID) = Tracer::ParseRecord($blockRecord);
456 :     # Create the block data for this block.
457 :     my %blockData = ( id => $blockID, description => $blockName );
458 :     # Initialize the tracking hashes. "genomesFound" tracks the
459 : parrello 1.2 # genomes whose contigs are represented by the block,
460 : parrello 1.1 # "regionMap" maps each region to its contents, and
461 :     # "regionPeg" maps each region to its PEG (if any).
462 :     my %genomesFound = ();
463 :     my %regionMap = ();
464 :     my %regionPeg = ();
465 :     # Count the number of regions found in this block.
466 :     my $regionCounter = 0;
467 :     # Loop through the regions in the block. Because of the way
468 :     # "GetRegionRecord" works, the "blockID" field will have an
469 :     # impossibly high value if we've hit end-of-file in the
470 :     # region input file.
471 :     while ($regionRecord{blockID} <= $blockID) {
472 :     # If this region's block ID is invalid, complain
473 :     # and skip it.
474 :     if ($regionRecord{blockID} != $blockID) {
475 : parrello 1.3 Trace("Block $regionRecord{blockID} in region record $regionsCount not found in block input file at record $blocksCount.") if T(0);
476 : parrello 1.1 $errorCount++;
477 :     } else {
478 :     # Here both files are in sync, which is good. The next step is
479 :     # to connect with the Genome and the Contig.
480 :     my $genomeID = $regionRecord{genomeID};
481 :     my $contigID = "$genomeID:$regionRecord{contigID}";
482 :     if (! exists $genomes{$genomeID}) {
483 : parrello 1.3 Trace("Genome $genomeID in region record $regionsCount not found in genome input file.") if T(0);
484 : parrello 1.1 $errorCount++;
485 :     } else {
486 :     # Denote this genome has an instance of this block.
487 :     $genomesFound{$genomeID} = 1;
488 :     # Denote this genome has occurred in the region file.
489 :     $genomes{$genomeID} = 1;
490 :     # Connect the contig to the genome.
491 :     $contigs{$contigID} = $genomeID;
492 :     # Now we need to process the snippet. First, we create a
493 :     # region string.
494 :     my $regionString = "${contigID}_$regionRecord{begin}_$regionRecord{end}";
495 :     # Next, we stuff the snippet and PEG in the region's hash entries.
496 :     my $snippet = $regionRecord{snippet};
497 :     $regionMap{$regionString} = $snippet;
498 :     $regionPeg{$regionString} = $regionRecord{peg};
499 :     # Check to see if this is the block's first snippet.
500 :     if (! exists $blockData{pattern}) {
501 :     # Here it is, so store the snippet as the pattern.
502 :     $blockData{pattern} = $snippet;
503 :     $blockData{"snip-count"} = 0;
504 :     $blockData{len} = length $snippet;
505 :     } elsif ($blockData{len} != length $snippet) {
506 :     # Here it is not the first, but the lengths do not match.
507 : parrello 1.3 Trace("Snippet for region record $regionsCount does not match block length $blockData{len}.") if T(0);
508 : parrello 1.1 $errorCount++;
509 :     } else {
510 :     # Here everything is legitimate, so we merge the new
511 :     # snippet into the pattern.
512 :     ($blockData{pattern}, $blockData{"snip-count"}) =
513 :     SimBlocks::MergeDNA($blockData{pattern}, $snippet);
514 :     }
515 :     }
516 :     # Count this region.
517 :     $regionCounter++;
518 :     }
519 :     # Get the next region record.
520 :     %regionRecord = GetRegionRecord();
521 :     }
522 :     # We have now processed all the regions in the block. Insure we found at least
523 :     # one.
524 :     if (! $regionCounter) {
525 : parrello 1.3 Trace("No regions found for block $blockID at $blocksCount in block input file.") if T(0);
526 : parrello 1.1 $errorCount++;
527 :     } else {
528 : parrello 1.4 Trace("$regionCounter regions found in block $blockID.") if T(4);
529 : parrello 1.7 # Write the block record. Note that the block ID is prefixed by the group name to
530 :     # make it unique.
531 : parrello 1.1 my $variance = $blockData{"snip-count"} / $blockData{len};
532 : parrello 1.7 print BLOCKSOUT join("\t", "$groupName:$blockID", $blockData{description}, $blockData{len},
533 : parrello 1.1 $blockData{"snip-count"}, $variance, $blockData{pattern}) . "\n";
534 :     # Find all the variance points in the block pattern. We'll use them to create
535 :     # the content strings for each region.
536 :     my @positions = SimBlocks::ParsePattern($blockData{pattern});
537 :     # Loop through the regions, writing them out to the region output file.
538 :     for my $region (keys %regionMap) {
539 : parrello 1.3 if (length($region) > 80) {
540 :     Trace("Invalid region key \"$region\".") if T(1);
541 :     }
542 : parrello 1.1 # Get the region's snips.
543 :     my $source = $regionMap{$region};
544 :     my $content = "";
545 :     for my $pos (@positions) {
546 :     $content .= substr $source, $pos, 1;
547 :     }
548 :     # Get the region's location data.
549 : parrello 1.2 my $location = BasicLocation->new($region);
550 : parrello 1.1 # Write this region to the output files.
551 :     print REGIONSOUT join("\t", $region, $location->Contig, $location->Dir,
552 : parrello 1.3 $location->Right, $location->Length,
553 :     $regionPeg{$region}, $location->Left, $content) . "\n";
554 : parrello 1.1 print CONTAINSOUT join("\t", $location->Contig, $region,
555 :     $location->Length, $location->Left) . "\n";
556 : parrello 1.7 print INCLUDESOUT join("\t", "$groupName:$blockID", $region) . "\n";
557 : parrello 1.1 }
558 :     # Finally, we need to connect this block to the genomes in which it occurs.
559 :     for my $genomeID (keys %genomesFound) {
560 : parrello 1.7 print INSTANCESOUT join("\t", $genomeID, "$groupName:$blockID") . "\n";
561 : parrello 1.1 }
562 :     # Count this block's regions.
563 :     $regionsCount += $regionCounter;
564 :     }
565 :     }
566 :     # Close the input files.
567 :     close BLOCKSIN;
568 :     close REGIONSIN;
569 :     }
570 :     # Close the output files.
571 :     close REGIONSOUT;
572 :     close BLOCKSOUT;
573 :     close INSTANCESOUT;
574 :     # All the block data has been written. Tell the user what we found.
575 : parrello 1.3 Trace("$blocksCount blocks processed, $regionsCount regions processed.") if T(2);
576 : parrello 1.1 # The next task is to write the genome/contig data. This is provided by the
577 :     # "%contigs" hash. First, we need to open the files.
578 :     my $contigsCount = 0;
579 : parrello 1.3 Open(\*CONTIGSOUT, ">>$outDirectory/Contig.dtx");
580 :     Open(\*CONSISTSOUT, ">>$outDirectory/ConsistsOf.dtx");
581 : parrello 1.1 for my $contigID (keys %contigs) {
582 :     print CONTIGSOUT "$contigID\n";
583 :     print CONSISTSOUT join("\t", $contigs{$contigID}, $contigID) . "\n";
584 :     $contigsCount++;
585 :     }
586 : parrello 1.3 Trace("$contigsCount contigs found.") if T(2);
587 : parrello 1.4 # Close the output files.
588 :     close CONTIGSOUT;
589 :     close CONSISTSOUT;
590 : parrello 1.1 # Now warn the user about all the genomes that didn't have blocks.
591 :     for my $genomeID (keys %genomes) {
592 :     if (! $genomes{$genomeID}) {
593 : parrello 1.3 Trace("Genome $genomeID did not have any regions.") if T(1);
594 : parrello 1.1 $errorCount++;
595 :     }
596 :     }
597 : parrello 1.3 return $errorCount;
598 : parrello 1.1 }
599 :     # Tell the user we're done.
600 : parrello 1.3 Trace("Processing complete.") if T(0);
601 : parrello 1.1
602 :     # Read a region record from the file and parse it into a hash
603 :     # for return to the caller. If we reach end-of-file, the
604 :     # hash returned will have $TRAILER in the blockID field.
605 :     sub GetRegionRecord {
606 :     # Create the return hash.
607 :     my %retVal = ();
608 :     # Read the record.
609 :     my $regionData = <REGIONSIN>;
610 :     # Check for end-of-file.
611 :     if (!defined $regionData) {
612 :     # Here we have end-of-file, so stuff in a trailer
613 :     # value for the block ID.
614 :     $retVal{blockID} = $TRAILER;
615 :     } else {
616 :     # Here we have a real record.
617 :     ($retVal{peg}, $retVal{genomeID}, $retVal{contigID},
618 :     $retVal{begin}, $retVal{end}, $retVal{blockID},
619 :     $retVal{snippet}) = Tracer::ParseRecord($regionData);
620 :     }
621 :     # Return the hash created.
622 :     return %retVal;
623 :     }
624 :    
625 :     1;

MCS Webmaster
ViewVC Help
Powered by ViewVC 1.0.3