Changeset 31587
- Timestamp:
- May 29, 2011, 11:26:31 AM (15 years ago)
- Location:
- branches/eam_branches/ipp-20110505
- Files:
-
- 30 edited
-
ippScripts/scripts/camera_exp.pl (modified) (1 diff)
-
ippScripts/scripts/dist_advancerun.pl (modified) (1 diff)
-
ippScripts/scripts/dist_bundle.pl (modified) (6 diffs)
-
ippScripts/scripts/ipp_apply_burntool_single.pl (modified) (1 prop)
-
ippScripts/scripts/lap_science.pl (modified) (21 diffs)
-
ippScripts/scripts/receive_file.pl (modified) (2 diffs)
-
ippScripts/scripts/warp_skycell.pl (modified) (4 diffs)
-
ippTasks/lap.pro (modified) (21 diffs)
-
ippToPsps/jython/batch.py (modified) (17 diffs)
-
ippToPsps/jython/detectionbatch.py (modified) (22 diffs)
-
ippToPsps/jython/gpc1db.py (modified) (12 diffs)
-
ippToPsps/jython/ipptopspsdb.py (modified) (8 diffs)
-
ippToPsps/jython/mysql.py (modified) (14 diffs)
-
ippToPsps/jython/scratchdb.py (modified) (11 diffs)
-
ippToPsps/jython/stackbatch.py (modified) (37 diffs)
-
ippTools/share/laptool_definerun.sql (modified) (2 diffs)
-
ippTools/share/laptool_exposures.sql (modified) (1 diff)
-
ippTools/src/disttool.c (modified) (1 diff)
-
ippTools/src/laptool.c (modified) (2 diffs)
-
ippTools/src/laptoolConfig.c (modified) (2 diffs)
-
ippTools/src/pstamptool.c (modified) (2 diffs)
-
ippTools/src/pubtoolConfig.c (modified) (1 diff)
-
ippTools/src/stacktool.c (modified) (2 diffs)
-
ippTools/src/stacktoolConfig.c (modified) (1 diff)
-
ppStack/src/ppStackPrepare.c (modified) (5 diffs)
-
psModules/src/imcombine/pmSubtractionStamps.c (modified) (4 diffs)
-
psModules/src/objects/models (modified) (1 prop)
-
psconfig/psbuild (modified) (1 diff)
-
pstamp/scripts/pstamp_job_run.pl (modified) (1 diff)
-
pstamp/scripts/pstampparse.pl (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
branches/eam_branches/ipp-20110505/ippScripts/scripts/camera_exp.pl
r30071 r31587 153 153 } 154 154 chomp $cmdflags; 155 156 { # Determine if FWHM is too large to bother continuing. 157 my $command = "$ppConfigDump -camera $camera -dump-camera -"; 158 my ($success, $error_code, $full_buf, $stdout_buf, $stderr_buf ) = 159 run(command => $command, verbose => $verbose); 160 unless ($success) { 161 $error_code = (($error_code >> 8) or $PS_EXIT_PROG_ERROR); 162 &my_die("Unable to perform ppConfigDump: $error_code", $cam_id, $PS_EXIT_CONFIG_ERROR); 163 } 164 my $cameraConfig = $mdcParser->parse(join "", @$stdout_buf) or 165 &my_die("Unable to parse metadata config doc", $cam_id, $PS_EXIT_CONFIG_ERROR); 166 167 my $maxFWHM = metadataLookupStr($cameraConfig, 'MAX_ALLOWED_FWHM'); 168 if ($maxFWHM) { 169 my $expFWHM; 170 ($expFWHM) = $cmdflags =~ /-fwhm_major (\d+)/; 171 172 if ($expFWHM > $maxFWHM) { 173 print "Setting quality to 4007 due to large FWHM: exposure: $expFWHM maximum: $maxFWHM\n"; 174 $cmdflags .= " -quality 4007 "; # This corresponds to PSASTRO_ERR_DATA 175 $no_op = 1; 176 } 177 } 178 } 155 179 } 156 180 -
branches/eam_branches/ipp-20110505/ippScripts/scripts/dist_advancerun.pl
r30489 r31587 114 114 $tool_cmd = "$staticskytool -sky_id"; 115 115 $list_mode = "-result"; 116 $component_key = " ";116 $component_key = "skycell_id"; 117 117 } elsif ($stage eq "diff") { 118 118 $tool_cmd = "$difftool -diff_id"; -
branches/eam_branches/ipp-20110505/ippScripts/scripts/dist_bundle.pl
r30922 r31587 62 62 my $streaksrelease = can_run('streaksrelease') or (warn "Can't find streaksrelease" and $missing_tools = 1); 63 63 my $bgtool = can_run('bgtool') or (warn "Can't find bgtool" and $missing_tools = 1); 64 my $staticskytool = can_run('staticskytool') or (warn "Can't find staticskytool" and $missing_tools = 1); 64 65 my $file_cmd = can_run('file') or (warn "can't find program file" and $missing_tools = 1); 65 66 my $zcat = can_run('zcat') or (warn "can't find program zcat" and $missing_tools = 1); … … 106 107 defined $stage_id and 107 108 defined $component and 108 defined $exp_type and109 109 defined $path_base and 110 110 defined $outroot; 111 111 112 if ($stage eq 'raw' and !$clean and !$no_magic) { 113 114 # for raw stage need to have exposure type defined and if the type is OBJECT we need 115 # a chip_path_base so we can find the chip mask file 116 if (!defined $exp_type or ($exp_type eq 'OBJECT' and !defined $chip_path_base)) { 117 pod2usage( -msg => "Required options: --chip_path_base --exp_type for raw stage", -exitval => 3); 118 } 119 } 120 112 121 $ipprc->redirect_output($logfile) if $logfile; 113 122 114 if ($stage eq 'raw' and !$clean and !$no_magic) {115 # need to be able to find chip mask file116 if ($exp_type eq 'OBJECT' and !defined $chip_path_base) {117 pod2usage( -msg => "Required options: --chip_path_base for raw stage", -exitval => 3);118 }119 }120 121 123 $ipprc->define_camera($camera); 122 124 123 125 $ipprc->outroot_prepare($outroot); 124 126 127 my $num_sky_inputs; 128 if ($stage eq 'sky') { 129 $num_sky_inputs = get_num_sky_inputs($stage_id); 130 } 125 131 # Get the list of data products for this component 126 132 # note: We my_die in get_file_list if something goes wrong. 127 133 128 my $file_list = get_file_list($stage, $component, $path_base, $clean );134 my $file_list = get_file_list($stage, $component, $path_base, $clean, $num_sky_inputs); 129 135 130 136 if (($stage ne 'raw') and ($stage ne 'fake') and !$poor_quality) { … … 184 190 # it must exist) 185 191 next if $poor_quality; 192 193 # skip file stats file. Due to a bug the update process destroys them sometimes 194 # XXX: perhaps only do this for stages where we know that this happens 195 next if $file_rule =~ /STATS/; 186 196 187 197 &my_die("failed to resolve $file_name", $component, $PS_EXIT_DATA_ERROR); … … 480 490 my $path_base = shift; 481 491 my $clean = shift; 492 my $num_sky_inputs = shift; 482 493 483 494 my @file_list; … … 531 542 $config_file_rule = "PPSTACK.CONFIG"; 532 543 } elsif ($stage eq "sky") { 533 $config_file_rule = "PSPHOT.STACK.CONFIG"; 544 if ($num_sky_inputs > 1) { 545 $config_file_rule = "PSPHOT.STACK.CONFIG"; 546 } else { 547 $config_file_rule = "PSPHOT.SKY.CONFIG"; 548 } 534 549 } else { 535 550 &my_die("$stage is not a valid stage", $component, $PS_EXIT_CONFIG_ERROR); … … 627 642 return \@file_list; 628 643 } 644 sub get_num_sky_inputs { 645 my $sky_id = shift; 646 647 my $command = "$staticskytool -inputs -sky_id $sky_id -simple"; 648 $command .= " -dbname $dbname" if $dbname; 649 $command .= " | wc"; 650 my ( $success, $error_code, $full_buf, $stdout_buf, $stderr_buf ) = 651 run(command => $command, verbose => $verbose); 652 unless ($success) { 653 $error_code = (($error_code >> 8) or $PS_EXIT_PROG_ERROR); 654 &my_die("Unable to perform $command: $error_code", $component, $error_code); 655 } 656 my ($num_inputs, $words, $chars) = split " ", (join "", @$stdout_buf); 657 if (!$num_inputs) { 658 $num_inputs = "undefined" if !defined $num_inputs; 659 &my_die("unexpected number of static sky inputs $num_inputs", $PS_EXIT_PROG_ERROR, $component, $error_code); 660 } 661 662 return $num_inputs; 663 } 629 664 630 665 sub my_die -
branches/eam_branches/ipp-20110505/ippScripts/scripts/ipp_apply_burntool_single.pl
- Property svn:mergeinfo changed (with no actual effect on merging)
-
branches/eam_branches/ipp-20110505/ippScripts/scripts/lap_science.pl
r31454 r31587 99 99 my $status = queue_chips($lap_id); 100 100 101 if ( $status) {101 if (!$status) { # This is the culprit. 102 102 my $command = "$laptool -updaterun -lap_id $lap_id"; 103 103 $command .= " -dbname $dbname " if defined $dbname; … … 120 120 # This is a puzzler... chiptool doesn't actually return a useful metadata. We'll just scrape it from the database for now. 121 121 122 my $command = "$chiptool -listrun - exp_id $exp_id -label $label -data_group $data_group";122 my $command = "$chiptool -listrun -pstamp_order -exp_id $exp_id -label $label -data_group $data_group"; 123 123 $command .= " -dbname $dbname " if defined $dbname; 124 124 … … 236 236 my $comment = $exposure->{comment}; 237 237 238 # This is a hack to fix old exposures that have no object. 239 unless(defined($comment)) { 240 $comment = ''; 241 } 242 if ((!defined($object))||($object eq 'NULL')||($object eq '')) { 243 if ($comment =~ /3pi_/) { 244 $object = $comment; 245 $object =~ s/^.*?(3pi_\d\d_\d\d\d\d).*?$/$1/; 246 } 247 elsif ($comment =~ / ps1_/) { 248 $object = $comment; 249 $object =~ s/^.*?(ps1_\d\d_\d\d\d\d).*$/$1/; 250 } 251 else { 252 $exposure->{data_state} = 'new'; 253 $exposure->{pairwise} = 0; 254 $exposure->{private} = 1; 255 $exposure->{pair_id} = 9223372036854775807; 256 update_this_exposure($exposure); 257 $counter++; 258 next; 259 } 260 } 261 262 238 263 if (S64_IS_NOT_NULL($chip_id)) { # We already have a defined chip_id 239 264 if (($pairwise) && !($pair_id)) { … … 255 280 $counter++; 256 281 } 282 print "ZZ: $exp_id $object $comment $matching{$object}{$comment}\n"; 257 283 } 258 284 … … 262 288 foreach my $comment (keys %{ $matching{$object} }) { 263 289 push @exp_ids_to_diff, $matching{$object}{$comment}; 290 print "$object $comment $matching{$object}{$comment} $indexing{$matching{$object}{$comment}} $exp_ids_to_diff[-1]\n"; 264 291 } 265 292 @exp_ids_to_diff = sort { $indexing{$a} <=> $indexing{$b} } @exp_ids_to_diff; … … 275 302 my $exp_A = ${ $exposures }[$indexing{$exp_id_A}]; 276 303 my $exp_B = ${ $exposures }[$indexing{$exp_id_B}]; 277 304 print "$exp_A $exp_B $exp_id_A $exp_id_B $indexing{$exp_id_A} $indexing{$exp_id_B}\n"; 278 305 $exp_A->{pairwise} = 1; 279 306 $exp_A->{private} = 0; … … 292 319 # Scan all exposures, and ensure that pairwise and private are set correctly 293 320 foreach my $exposure (@$exposures) { 321 print "YY: $exposure\n"; 294 322 if ($exposure->{pairwise} && !($exposure->{pair_id})) { 295 323 $exposure->{pairwise} = 0; # We marked it for pairwise diffs, but didn't match it. Probably an error. … … 299 327 } 300 328 301 $exposure =update_this_exposure($exposure);302 } 303 return( 1);329 update_this_exposure($exposure); 330 } 331 return(0); 304 332 } 305 333 … … 432 460 my $companion; 433 461 434 if ($exposure->{pair_id}) { # Load companion exposure information 435 if (exists($match_hash{$exposure->{chip_id}})) { 462 if ($exposure->{pairwise}) { 463 # Load companion exposure information 464 if (($exposure->{pair_id})&&(exists($match_hash{$exposure->{chip_id}}))) { 436 465 $companion = ${ $exposures }[$match_hash{$exposure->{chip_id}}]; # Match! 466 } 467 else { # We claimed to be pairwise, but do not have a valid pair_id. 468 $exposure->{pairwise} = 0; 469 $exposure->{private} = 1; 470 &update_this_exposure($exposure); 437 471 } 438 472 } … … 446 480 $exposure = remake_this_exposure($exposure); 447 481 } 448 if ($exposure->{cam_quality}) { 449 $needs_qstack = 1; 450 $needs_something_private = 1; 451 if ($companion) { 452 $companion->{private} = 1; 453 $companion->{pairwise} = 0; 454 &update_this_exposure($companion); 482 # Do quality checks here 483 my $is_bad_quality = 0; 484 if ((defined($exposure->{chipRun_state}))&&($exposure->{chipRun_state} eq 'full')&& 485 ($exposure->{chip_component_count} > 0)&&($exposure->{chip_bad_quality} / $exposure->{chip_component_count} > 0.05)) { 486 printf("QUALITY: $exposure->{exp_id} has bad chip quality: %d / %d\n", 487 $exposure->{chip_bad_quality} , $exposure->{chip_component_count}); 488 $is_bad_quality = 1; 489 } 490 elsif ((defined($exposure->{camRun_state}))&&($exposure->{camRun_state} eq 'full')&& 491 ($exposure->{cam_bad_quality} / $exposure->{cam_component_count} > 0)) { 492 printf("QUALITY: $exposure->{exp_id} has bad cam quality: %d / %d\n", 493 $exposure->{cam_bad_quality} , $exposure->{cam_component_count}); 494 $is_bad_quality = 1; 495 } 496 elsif ((defined($exposure->{warpRun_state}))&&($exposure->{warpRun_state} eq 'full')&& 497 ($exposure->{warp_bad_quality} / $exposure->{warp_component_count} > 0.2)) { 498 printf("QUALITY: $exposure->{exp_id} has bad warp quality: %d / %d\n", 499 $exposure->{warp_bad_quality} , $exposure->{warp_component_count}); 500 $is_bad_quality = 1; 501 } 502 elsif ((defined($exposure->{diffRun_state}))&&($exposure->{diffRun_state} eq 'full')&& 503 ($exposure->{diff_bad_quality} / $exposure->{diff_component_count} > 0.5)) { 504 printf("QUALITY: $exposure->{exp_id} has bad diff quality: %d / %d\n", 505 $exposure->{diff_bad_quality} , $exposure->{diff_component_count}); 506 $is_bad_quality = 1; 507 } 508 # If we've detected a bad quality exposure, drop it, and tell the companion. 509 if ($is_bad_quality) { 510 unless ((defined($exposure->{diffRun_state}))&& 511 ($exposure->{diffRun_state} eq 'full')) { 512 $needs_qstack = 1; 513 $needs_something_private = 1; 514 if ($companion) { 515 $companion->{private} = 1; 516 $companion->{pairwise} = 0; 517 &update_this_exposure($companion); 518 } 519 $exposure->{private} = 1; 520 $exposure->{pairwise} = 0; 455 521 } 456 $exposure->{private} = 1;457 $exposure->{pairwise} = 0;458 522 $exposure->{data_state} = 'drop'; 459 523 &update_this_exposure($exposure); 460 524 461 525 } 462 # if ($companion) { # Validate that there are no problems with the companion exposure 463 # if ($companion->{cam_quality}) { # Maybe other things here? 464 # $exposure->{private} = 1; 465 # $exposure->{data_state} = 'drop'; 466 # &update_this_exposure($exposure); 467 # $needs_qstack = 1; 468 # } 469 # } 526 470 527 if ($exposure->{data_state} eq 'drop') { # This exposure is impossible, so fudge the counts so we get through. 471 528 $can_qstack ++; … … 564 621 } 565 622 623 my $warps = ''; 624 foreach $exposure (@$exposures) { 625 if (($exposure->{data_state} != 'drop')&& 626 (S64_IS_NOT_NULL($exposure->{warp_id}))) { 627 $warps .= " -warp_id $exposure->{warp_id} "; 628 } 629 } 630 566 631 my @utctime = gmtime(); 567 632 $utctime[5] += 1900; … … 570 635 my $workdir_date = sprintf("%4d/%02d/%02d",$utctime[5],$utctime[4],$utctime[3]); 571 636 my $workdir = "neb://\@HOST\@.0/${dbname}/${label}/${workdir_date}"; 572 my $data_group = "${label}.${ date}";637 my $data_group = "${label}.${proj_cell}.quick.${date}"; 573 638 574 639 my $command = "$stacktool "; … … 576 641 $command .= " -dbname $dbname " if defined $dbname; 577 642 $command .= " -definebyquery -select_label $label -select_skycell_id ${proj_cell}.% -select_filter $filter "; 578 $command .= " -set_label ${label} -set_data_group $ {proj_cell}.quick.${date}";643 $command .= " -set_label ${label} -set_data_group $data_group "; 579 644 $command .= " -set_workdir $workdir -set_dist_group NODIST "; 580 645 $command .= " -min_num 2 -set_reduction QUICKSTACK "; 646 $command .= " $warps "; 581 647 582 648 my ($success, $error_code, $full_buf, $stdout_buf, $stderr_buf ) = … … 589 655 $command = "$stacktool "; 590 656 $command .= " -dbname $dbname " if defined $dbname; 591 $command .= " -sassskyfile -data_group $ {proj_cell}.quick.${date}";657 $command .= " -sassskyfile -data_group $data_group "; 592 658 $command .= " -filter $filter -projection_cell ${proj_cell} "; 593 659 … … 632 698 unless (defined($label) && defined($filter) && defined($proj_cell)) { 633 699 &my_die("Unable to perform stacktool. Insufficient information.", $lap_id); 700 } 701 702 my $warps = ''; 703 foreach $exposure (@$exposures) { 704 if (($exposure->{data_state} != 'drop')&& 705 (S64_IS_NOT_NULL($exposure->{magicked}))&& 706 (S64_IS_NOT_NULL($exposure->{warp_id}))) { 707 $warps .= " -warp_id $exposure->{warp_id} "; 708 } 634 709 } 635 710 … … 640 715 my $workdir_date = sprintf("%4d/%02d/%02d",$utctime[5],$utctime[4],$utctime[3]); 641 716 my $workdir = "neb://\@HOST\@.0/${dbname}/${label}/${workdir_date}"; 642 my $data_group = "${label}.${ date}";717 my $data_group = "${label}.${proj_cell}.final.${date}"; 643 718 644 719 my $command = "$stacktool "; … … 646 721 $command .= " -dbname $dbname " if defined $dbname; 647 722 $command .= " -definebyquery -select_label $label -select_skycell_id ${proj_cell}.% -select_filter $filter "; 648 $command .= " -set_label ${label} -set_workdir $workdir -set_data_group ${proj_cell}.final.${date} "; 649 $command .= " -min_num 2 -set_reduction THREEPI_STACK "; 723 $command .= " -set_label ${label} -set_workdir $workdir -set_data_group $data_group "; 724 $command .= " -min_num 2 -set_reduction THREEPI_STACK -set_dist_group ${label} "; 725 $command .= " $warps "; 650 726 651 727 my ($success, $error_code, $full_buf, $stdout_buf, $stderr_buf ) = … … 659 735 $command = "$stacktool "; 660 736 $command .= " -dbname $dbname " if defined $dbname; 661 $command .= " -sassskyfile -data_group $ {proj_cell}.final.${date}";737 $command .= " -sassskyfile -data_group $data_group "; 662 738 $command .= " -filter $filter -projection_cell ${proj_cell} "; 663 739 … … 734 810 $command .= " -set_dist_group $exposure->{dist_group} "; 735 811 } 736 737 if ( $exposure->{pairwise}) { # warpwarp812 my $retry_command; 813 if (($exposure->{pairwise})&&(defined(${ $exposures }[$match_hash{$exposure->{chip_id}}]))) { # warpwarp 738 814 my $companion = ${ $exposures }[$match_hash{$exposure->{chip_id}}]; 739 815 $command .= " -definewarpwarp "; 740 $command .= "-input_label $label -template_label $label -backwards";816 $command .= "-input_label $label -template_label $label "; 741 817 $command .= "-warp_id $exposure->{warp_id} -template_warp_id $companion->{warp_id} "; 818 $retry_command = $command; 819 $command .= " -backwards "; # This usually works. 742 820 $already_queued{$exposure->{warp_id}} = 1; 743 821 $already_queued{$companion->{warp_id}} = 1; … … 763 841 my $diff_id = $diff->{diff_id}; 764 842 unless (defined($diff_id)) { 765 $exposure->{data_state} = 'drop'; 766 &update_this_exposure($exposure); 767 } 768 843 if ($retry_command) { 844 ($success, $error_code, $full_buf, $stdout_buf, $stderr_buf ) = 845 run(command => $retry_command, verbose => $verbose); 846 unless ($success) { 847 $error_code = (($error_code >> 8) or $PS_EXIT_PROG_ERROR); 848 &my_die("unable to perform difftool -definewarp(warp|stack): $error_code", $exposure->{lap_id}, $exposure->{proj_cell}); 849 } 850 851 $diffs = $mdcParser->parse_list(join "", @$stdout_buf) or 852 &my_die("Unable to parse metadata from difftool -definewarp(warp|stack)", $lap_id, ""); 853 854 $diff = ${ $diffs }[0]; 855 $diff_id = $diff->{diff_id}; 856 } 857 unless (defined($diff_id)) { 858 $exposure->{data_state} = 'drop'; 859 &update_this_exposure($exposure); 860 } 861 } 769 862 } 770 863 } … … 872 965 873 966 my $command = "$laptool -updateexp -lap_id $lap_id -exp_id $exp_id "; 967 $command .= " -dbname $dbname " if defined $dbname; 874 968 if (($exposure->{chip_id})&&(S64_IS_NOT_NULL($exposure->{chip_id}))) { 875 969 $command .= " -set_chip_id $exposure->{chip_id} "; -
branches/eam_branches/ipp-20110505/ippScripts/scripts/receive_file.pl
r30674 r31587 224 224 $stage = 'sky'; 225 225 # XXX: This should be skycell, but the distribution code uses exposure 226 $comp_name = ' exposure';227 $current_component = $comp_name;226 $comp_name = 'skycell_id'; 227 # $current_component = $comp_name; 228 228 } else { 229 229 &my_die( "unexpected run type line found in $filename: $runType\n", $file_id, $PS_EXIT_UNKNOWN_ERROR); … … 237 237 $new_workdir_value = "$workdir/$destdir"; 238 238 } 239 240 if ($stage eq 'sky') { 241 # the dbinfo file for a skyRun only has one component and it doesn't contain 242 # skycell_id which is the way components are listed in the dirinfo file. 243 my @ids = keys %$components; 244 &my_die( "unexpected number of components scalar @ids found in staticsky dirinfo file\n", $file_id, $PS_EXIT_UNKNOWN_ERROR) if scalar @ids != 1; 245 $current_component = $ids[0]; 246 } 247 239 248 my $component_dir; 240 249 if ($current_component) { -
branches/eam_branches/ipp-20110505/ippScripts/scripts/warp_skycell.pl
r30825 r31587 72 72 73 73 my ($logDest, $traceDest); 74 my $do_stats; 74 75 if ($run_state eq 'new') { 75 76 $logDest = prepare_output("LOG.EXP", $outroot, $skycell_id, 0); 76 77 $traceDest = prepare_output("TRACE.EXP", $outroot, $skycell_id, 1); 78 $do_stats = 1; 77 79 } elsif ($run_state eq 'update') { 78 80 $logDest = prepare_output("LOG.EXP.UPDATE", $outroot, $skycell_id, 1); … … 162 164 my $outputBin1 = prepare_output ("PSWARP.BIN1", $outroot, $skycell_id, 1); 163 165 my $outputBin2 = prepare_output ("PSWARP.BIN2", $outroot, $skycell_id, 1); 164 my $outputStats = prepare_output ("SKYCELL.STATS", $outroot, $skycell_id, 1); 166 my $outputStats; 167 if ($do_stats) { 168 $outputStats = prepare_output ("SKYCELL.STATS", $outroot, $skycell_id, 1) if $do_stats; 169 } 165 170 my $configuration; 166 171 … … 257 262 # Run pswarp 258 263 my $cmdflags; 259 my $do_stats;260 264 unless ($no_op) { 261 265 my $command = "$pswarp"; … … 279 283 if ($run_state eq 'new') { 280 284 $command .= " -dumpconfig $configuration"; 281 $do_stats = 1;282 285 } else { 283 286 $command .= " -ipprc $configuration"; -
branches/eam_branches/ipp-20110505/ippTasks/lap.pro
r31454 r31587 17 17 18 18 macro lap.show.books 19 echo "lapNewRuns" 19 20 book listbook lapNewRuns 21 echo "lapRunRuns" 20 22 book listbook lapRunRuns 23 echo "lapDoneRuns" 21 24 book listbook lapDoneRuns 25 echo "lapFullRuns" 22 26 book listbook lapFullRuns 27 end 28 29 macro lap.clear.books 30 book init lapNewRuns 31 book init lapRunRuns 32 book init lapFullRuns 33 book init lapDoneRuns 23 34 end 24 35 … … 36 47 active true 37 48 end 38 task lap.cleanup.load39 active true40 end41 task lap.cleanup.run42 active true43 end44 49 end 45 50 … … 57 62 active false 58 63 end 59 task lap.cleanup.load 60 active false 61 end 62 task lap.cleanup.run 64 end 65 66 macro lap.debug.mode 67 task lap.initial.load 68 active true 69 end 70 task lap.initial.run 71 active false 72 end 73 task lap.monitor.load 74 active true 75 end 76 task lap.monitor.run 63 77 active false 64 78 end … … 156 170 # success 157 171 task.exit 0 158 # book delpage lapNewRuns $options:0159 172 ipptool2book stdout lapNewRuns -uniq -key lap_id -setword dbname $options:0 -setword pantaskState INIT 160 173 174 process_cleanup lapNewRuns 175 161 176 if ($VERBOSE > 2) 162 177 book listbook lapNewRuns … … 181 196 periods -exec $LOADEXEC 182 197 periods -timeout 600 198 active false 183 199 # This can probably be increased and spread over hosts in the future. 184 200 npending 1 … … 193 209 194 210 195 book getpage lapNewRuns $lap_NewPage -var lapNewPageName211 book getpage lapNewRuns 0 -var lapNewPageName -key pantaskState INIT 196 212 197 213 $lap_NewPage ++ 198 214 if ($lap_NewPage >= $N) set lap_NewPage = 0 199 215 200 201 216 if ("$lapNewPageName" == "NULL") break 217 218 202 219 203 220 book setword lapNewRuns $lapNewPageName pantaskState RUN … … 205 222 book getword lapNewRuns $lapNewPageName dbname -var DBNAME 206 223 224 option $LAP_ID 225 207 226 $run = lap_science.pl --chip_mode --dbname $DBNAME --lap_id $LAP_ID 208 209 command $run 210 211 end 212 213 # success 214 task.exit 0 215 # book delpage lapNewRuns $options:0 216 # ipptool2book stdout lapNewRuns -uniq -key lap_id 217 227 228 command $run 229 230 end 231 232 # success 233 task.exit 0 234 process_exit lapNewRuns $options:0 0 218 235 if ($VERBOSE > 2) 219 236 book listbook lapNewRuns … … 222 239 # locked list 223 240 task.exit default 224 showcommand failure 225 end 226 task.exit crash 227 showcommand crash 228 end 229 #operation times out? 230 task.exit timeout 241 process_exit lapNewRuns $options:0 0 242 showcommand failure 243 end 244 task.exit crash 245 process_exit lapNewRuns $options:0 0 246 showcommand crash 247 end 248 #operation times out? 249 task.exit timeout 250 process_exit lapNewRuns $options:0 0 231 251 showcommand timeout 232 252 end … … 267 287 268 288 add_poll_labels run 269 # echo $run 270 command $run 271 end 272 # success 273 task.exit 0 274 # book delpage lapRunRuns $options:0 289 command $run 290 end 291 # success 292 task.exit 0 275 293 ipptool2book stdout lapRunRuns -uniq -key lap_id -setword dbname $options:0 -setword pantaskState INIT 294 295 process_cleanup lapRunRuns 276 296 277 297 if ($VERBOSE > 2) … … 297 317 periods -exec $LOADEXEC 298 318 periods -timeout 600 319 active false 299 320 # This can probably be increased and spread over hosts in the future. 300 321 npending 1 … … 306 327 book npages lapRunRuns -var N 307 328 308 # echo $N $NETWORK309 329 if ($N == 0) break 310 330 if ($NETWORK == 0) break 311 331 312 332 313 book getpage lapRunRuns $lap_RunPage -var lapRunPageName333 book getpage lapRunRuns 0 -var lapRunPageName -key pantaskState INIT 314 334 315 335 $lap_RunPage ++ 316 336 if ($lap_RunPage >= $N) set lap_RunPage = 0 317 337 318 # echo $lapRunPageName319 338 if ("$lapRunPageName" == "NULL") break 320 339 … … 323 342 book getword lapRunRuns $lapRunPageName dbname -var DBNAME 324 343 344 option $LAP_ID 345 325 346 $run = lap_science.pl --monitor_mode --dbname $DBNAME --lap_id $LAP_ID 326 347 … … 331 352 # success 332 353 task.exit 0 333 # book delpage lapRunRuns $options:0 334 # ipptool2book stdout lapRunRuns -uniq -key lap_id 335 336 if ($VERBOSE > 2) 354 process_exit lapRunRuns $options:0 0 355 if ($VERBOSE > 2) 356 337 357 book listbook lapRunRuns 338 358 end … … 340 360 # locked list 341 361 task.exit default 342 showcommand failure 343 end 344 task.exit crash 345 showcommand crash 346 end 347 #operation times out? 348 task.exit timeout 362 process_exit lapRunRuns $options:0 0 363 showcommand failure 364 end 365 task.exit crash 366 process_exit lapRunRuns $options:0 0 367 showcommand crash 368 end 369 #operation times out? 370 task.exit timeout 371 process_exit lapRunRuns $options:0 0 349 372 showcommand timeout 350 373 end … … 358 381 periods -exec $LOADEXEC 359 382 periods -timeout 30 383 active false 360 384 npending 1 361 385 … … 390 414 ipptool2book stdout lapDoneRuns -uniq -key lap_id -setword dbname $options:0 -setword pantaskState INIT 391 415 416 process_cleanup lapDoneRuns 392 417 if ($VERBOSE > 2) 393 418 book listbook lapRuns … … 412 437 periods -exec $LOADEXEC 413 438 periods -timeout 600 439 active false 414 440 # This can probably be increased and spread over hosts in the future. 415 441 npending 1 … … 424 450 425 451 426 book getpage lapDoneRuns $lap_DonePage -var lapDonePageName452 book getpage lapDoneRuns 0 -var lapDonePageName -key pantaskState INIT 427 453 428 454 $lap_DonePage ++ … … 435 461 book getword lapDoneRuns $lapDonePageName dbname -var DBNAME 436 462 463 option $LAP_ID 437 464 $run = lap_science.pl --cleanup_mode --dbname $DBNAME --lap_id $LAP_ID 438 465 … … 443 470 # success 444 471 task.exit 0 445 # ipptool2book stdout lapDoneRuns -uniq -key lap_id 446 472 process_exit lapDoneRuns $options:0 0 447 473 if ($VERBOSE > 2) 448 474 book listbook lapDoneRuns … … 451 477 # locked list 452 478 task.exit default 453 showcommand failure 454 end 455 task.exit crash 456 showcommand crash 457 end 458 #operation times out? 459 task.exit timeout 460 showcommand timeout 461 end 462 end 479 process_exit lapDoneRuns $options:0 0 480 showcommand failure 481 end 482 task.exit crash 483 process_exit lapDoneRuns $options:0 0 484 showcommand crash 485 end 486 #operation times out? 487 task.exit timeout 488 process_exit lapDoneRuns $options:0 0 489 showcommand timeout 490 end 491 end -
branches/eam_branches/ipp-20110505/ippToPsps/jython/batch.py
r31405 r31587 32 32 "../config/2/tables.vot" 33 33 ''' 34 def __init__(self, logger, batchType, inputFitsPath="", survey="", useFullTables=False): 34 def __init__(self, 35 logger, 36 gpc1Db, 37 ippToPspsDb, 38 scratchDb, 39 id, 40 batchType, 41 inputFitsPath="", 42 survey="", 43 useFullTables=False): 44 45 self.everythingOK = False 46 self.readHeader = False 35 47 36 48 # set up logging … … 40 52 41 53 # set up class variables 54 self.id = id 55 self.gpc1Db = gpc1Db 56 self.ippToPspsDb = ippToPspsDb 57 self.scratchDb = scratchDb 42 58 self.batchType = batchType; 43 59 self.pspsVoTableFilePath = "../config/" + batchType + "/tables.vot" … … 46 62 self.useFullTables = useFullTables 47 63 64 if self.alreadyProcessed(): return 65 66 # do we have an input file? 67 if self.inputFitsPath != "": 68 69 if not self.readPrimaryHeader(): return 70 48 71 # TODO 49 72 self.tablesToExport = [] … … 51 74 # open config 52 75 doc = ElementTree(file="config.xml") 53 54 # create Gpc1Db object55 self.gpc1Db = Gpc1Db(self.logger)56 self.ippToPspsDb = IppToPspsDb(logger)57 self.scratchDb = ScratchDb(logger, self.useFullTables)58 76 59 77 if self.survey != "": … … 88 106 self.dateStr = now.strftime("%Y-%m-%d") 89 107 90 if self.inputFitsPath != "":91 file = open(self.inputFitsPath)92 self.header = self.parseFitsHeader(file)93 self.logger.info("Read primary and found " + str(len(self.header)) + " header cards")94 # TODO close file?95 96 108 # create DVO tables if accessing DVO directly 97 109 if not self.useFullTables: self.scratchDb.createDvoTables() 98 110 111 self.everythingOK = True 112 99 113 ''' 100 114 Destructor … … 103 117 104 118 self.logger.debug("Batch destructor") 119 120 121 ''' 122 Reads the primary header of the FITS file 123 ''' 124 def readPrimaryHeader(self): 125 126 if self.readHeader: return True 127 128 # does it exist? 129 if not os.path.isfile(self.inputFitsPath): 130 131 self.logger.error("Cannot read file at '" + self.inputFitsPath + "'") 132 return False 133 134 file = open(self.inputFitsPath) 135 self.header = self.parseFitsHeader(file) 136 self.logger.info("Read primary header and found " + str(len(self.header)) + " header cards") 137 # TODO close file? 138 139 self.readHeader = True 140 141 return True 105 142 106 143 … … 138 175 file.seek(index + 2880, 0) 139 176 140 if found != True: self.logger.error("...could not find extension '" + name + "'") 141 else: self.logger.info("...read header at '" + name + "' and found " + str(len(header)) + " header cards") 177 if found != True: 178 self.logger.error("...could not read header in extension '" + name + "'") 179 return 180 #else: self.logger.info("...read header at '" + name + "' and found " + str(len(header)) + " header cards") 142 181 143 182 return header … … 262 301 263 302 first = True 303 304 self.totalDetections = 0 264 305 for table in tables: 265 306 266 sql = "SELECT MIN(objID), MAX(objID) FROM " + table267 rs = self.scratchDb. stmt.executeQuery(sql)307 sql = "SELECT MIN(objID), MAX(objID), COUNT(objID) FROM " + table 308 rs = self.scratchDb.executeQuery(sql) 268 309 rs.first() 310 311 self.totalDetections = self.totalDetections + rs.getLong(3) 269 312 270 313 if first: … … 276 319 277 320 first = False 321 rs.close() 278 322 279 323 self.ippToPspsDb.updateMinMaxObjID(self.batchID, self.minObjID, self.maxObjID) 324 self.logger.info("Total detections = %ld min objID = %ld max objID = %ld" % (self.totalDetections, self.minObjID, self.maxObjID)) 325 280 326 281 327 ''' … … 313 359 self.pspsTables = stilts.treads(self.pspsVoTableFilePath) 314 360 for table in self.pspsTables: 315 self.logger. info("Creating PSPS table: " + table.name)361 self.logger.debug("Creating PSPS table: " + table.name) 316 362 table.write(self.scratchDb.url + '#' + table.name) 317 363 self.tablesToExport.append(table.name) … … 337 383 Accepts a regular expression filter so not all tables need to be imported 338 384 ''' 339 def importIppTables(self, filter ):385 def importIppTables(self, filter=""): 340 386 341 387 self.logger.info("Attempting to import tables from input FITS file") … … 347 393 match = re.match(filter, table.name) 348 394 if not match: continue 349 self.logger.info(" Reading IPP table " + table.name + " from FITS file")395 self.logger.info("Reading IPP table " + table.name + " from FITS file") 350 396 table = stilts.tpipe(table, cmd='explodeall') 351 397 352 398 # drop any previous tables before import 353 self.scratchDb.dropTable(table.name)399 #self.scratchDb.dropTable(table.name) 354 400 355 401 # IPP FITS files are littered with infinities, so remove these 356 self.logger. info("Removing Infinity values from all columns")402 self.logger.debug("Removing Infinity values from all columns") 357 403 table = stilts.tpipe(table, cmd='replaceval -Infinity null *') 358 404 table = stilts.tpipe(table, cmd='replaceval Infinity null *') … … 360 406 try: 361 407 table.write(self.scratchDb.url + '#' + table.name) 408 self.scratchDb.killLastConnectionID() 409 count = count + 1 362 410 except: 363 self.logger.exception(" Problem writing table '" + table.name + "' to the database")364 count = count + 1 411 self.logger.exception("Problem writing table '" + table.name + "' to the database") 412 365 413 366 414 self.logger.info("Done. Imported %d tables" % count) 367 368 415 self.indexIppTables() 369 416 … … 373 420 def exportPspsTablesToFits(self, regex="(.*)"): 374 421 375 self.logger.info("Replacing NULLs with -999 then exporting all PSPS tables to FITS")422 self.logger.info("Replacing NULLs with -999, changing tables names using regex: " + regex) 376 423 _tables = [] 377 424 378 self.logger.info(" Selecting database tables")425 self.logger.info("Selecting database tables") 379 426 for table in self.tablesToExport: 380 427 … … 383 430 384 431 # get everything from table 385 _table = stilts.tread(self.scratchDb.url + '#SELECT * FROM ' + table) 386 432 try: 433 _table = stilts.tread(self.scratchDb.url + '#SELECT * FROM ' + table) 434 self.scratchDb.killLastConnectionID() 435 except: 436 self.logger.exception("Could not read from DB table: " + table) 437 return False 438 387 439 # replace nulls and empty fields with weird PSPS -999 pseudo-null 388 440 _table = stilts.tpipe(_table, cmd='replaceval "" -999 *') … … 395 447 _tables.append(_table) 396 448 397 self.logger.info(" Writing to FITS file '" + self.outputFitsPath + "'...") 398 stilts.twrites(_tables, self.outputFitsPath, fmt='fits') 399 self.logger.info(" ...done") 400 self.ippToPspsDb.updateProcessed(self.batchID, 1) 449 self.logger.info("Writing to FITS file '" + self.outputFitsPath + "'...") 450 try: 451 stilts.twrites(_tables, self.outputFitsPath, fmt='fits') 452 self.ippToPspsDb.updateProcessed(self.batchID, 1) 453 except: 454 self.logger.exception("Could not write to FITS") 455 return False 456 457 return True 401 458 402 459 ''' … … 447 504 ''' 448 505 def alreadyProcessed(self): 449 self.logger.info("Not implemented") 450 451 452 506 self.logger.info("Not implemented") 507 508 509 ''' 510 Creates and publishes a batch 511 ''' 512 def run(self): 513 514 if not self.everythingOK: return 515 516 self.createEmptyPspsTables() 517 self.importIppTables() 518 if self.populatePspsTables(): 519 if self.exportPspsTablesToFits(): 520 self.writeBatchManifest() 521 self.createTarball() 522 self.publishToDatastore() 523 #self.reportNullsInAllPspsTables(False) 524 #sys.exit() 525 self.logger.info("Finished.") 526 527 -
branches/eam_branches/ipp-20110505/ippToPsps/jython/detectionbatch.py
r31406 r31587 6 6 from java.lang import * 7 7 from java.sql import * 8 8 9 from batch import Batch 9 10 from gpc1db import Gpc1Db 11 from ipptopspsdb import IppToPspsDb 12 from scratchdb import ScratchDb 10 13 11 14 import logging.config … … 19 22 Constructor 20 23 ''' 21 def __init__(self, logger, camID, inputFile, test=False, useFullTables=False): 24 def __init__(self, 25 logger, 26 gpc1Db, 27 ippToPspsDb, 28 scratchDb, 29 camID, 30 inputFile, 31 test=False, 32 useFullTables=False): 33 22 34 super(DetectionBatch, self).__init__( 23 35 logger, 36 gpc1Db, 37 ippToPspsDb, 38 scratchDb, 39 camID, 24 40 "detection", 25 41 inputFile, 26 "MD04", 27 useFullTables) # TODO 42 "MD04", # TODO 28 43 #"3PI") # TODO 29 30 self.logger.info("DetectionBatch constructor. Creating batch from: '" + inputFile + "'") 31 32 meta = self.gpc1Db.getCameraStageMeta(camID) 33 34 self.expID = meta[0]; 35 self.expName = meta[1]; 36 self.distGroup = meta[2]; 37 38 self.logger.info("Processing exposure with ID: %d, name: %s and distribution group: %s" % (self.expID, self.expName, self.distGroup)) 44 useFullTables) 45 46 if not self.everythingOK: return 47 48 # meta data to the log 49 self.logger.info("New Detection Batch:") 50 self.logger.info("Cam ID: %d" % self.id) 51 self.logger.info("file: %s" % inputFile) 52 self.logger.info("Exp ID: %d" % self.expID) 53 self.logger.info("Exp name: %s" % self.expName) 54 self.logger.info("Distribution group: %s" % self.distGroup) 39 55 40 56 # create an output filename, which is {expID}.FITS … … 167 183 ," + self.header['PCA2X0Y2'] + " \ 168 184 )" 169 self.scratchDb. stmt.execute(sql)185 self.scratchDb.execute(sql) 170 186 171 187 self.scratchDb.updateAllRows("FrameMeta", "surveyID", str(self.surveyID)) … … 184 200 self.scratchDb.dropTable(tableName) 185 201 sql = "CREATE TABLE " + tableName + " LIKE ImageMeta" 186 try: self.scratchDb. stmt.execute(sql)202 try: self.scratchDb.execute(sql) 187 203 except: pass 188 204 … … 310 326 )" 311 327 312 self.scratchDb. stmt.execute(sql)328 self.scratchDb.execute(sql) 313 329 self.scratchDb.updateFilterID(tableName, self.filter) 314 330 self.scratchDb.updateAllRows(tableName, "calibModNum", str(self.calibModNum)) 315 331 self.scratchDb.updateAllRows(tableName, "dataRelease", str(self.dataRelease)) 316 self.totalNumPhotoRef = self.totalNumPhotoRef + int(header['NASTRO'])332 if 'NASTRO' in header: self.totalNumPhotoRef = self.totalNumPhotoRef + int(header['NASTRO']) 317 333 self.scratchDb.replaceNullsInThisColumn(tableName, "polyOrder", "0") 318 334 … … 327 343 self.scratchDb.dropTable(tableName) 328 344 sql = "CREATE TABLE " + tableName + " LIKE Detection" 329 try: self.scratchDb. stmt.execute(sql)345 try: self.scratchDb.execute(sql) 330 346 except: pass 331 347 … … 375 391 ,EXT_NSIGMA \ 376 392 FROM " + ota + "_psf" 377 378 self.scratchDb.stmt.execute(sql) 393 self.scratchDb.execute(sql) 379 394 380 395 # set obsTime 381 396 sql = "UPDATE " + tableName + " SET obsTime = %f, assocDate = '%s', activeFlag = 0" % (self.obsTime, self.dateStr) 382 self.scratchDb. stmt.execute(sql)397 self.scratchDb.execute(sql) 383 398 self.scratchDb.updateAllRows(tableName, "dataRelease", str(self.dataRelease)) 384 399 self.scratchDb.updateAllRows(tableName, "historyModNum", "0") … … 387 402 self.scratchDb.updateFilterID(tableName, self.filter) 388 403 389 # now delete bad flux 404 # now delete bad flux and bad chip positions 390 405 self.scratchDb.reportAndDeleteRowsWithNULLS(tableName, "instFlux") 391 406 self.scratchDb.reportAndDeleteRowsWithNULLS(tableName, "peakADU") … … 401 416 self.scratchDb.dropTable(tableName) 402 417 sql = "CREATE TABLE " + tableName + " LIKE SkinnyObject" 403 try: self.scratchDb. stmt.execute(sql)418 try: self.scratchDb.execute(sql) 404 419 except: pass 405 420 … … 415 430 ,surveyID \ 416 431 FROM Detection_" + ota 417 self.scratchDb. stmt.execute(sql)432 self.scratchDb.execute(sql) 418 433 419 434 self.scratchDb.updateAllRows(tableName, "dataRelease", str(self.dataRelease)) … … 429 444 self.scratchDb.dropTable(tableName) 430 445 sql = "CREATE TABLE " + tableName + " LIKE ObjectCalColor" 431 try: self.scratchDb. stmt.execute(sql)446 try: self.scratchDb.execute(sql) 432 447 except: pass 433 448 … … 443 458 ,filterID \ 444 459 FROM Detection_" + ota 445 self.scratchDb. stmt.execute(sql)460 self.scratchDb.execute(sql) 446 461 447 462 self.scratchDb.updateAllRows(tableName, "calibModNum", str(self.calibModNum)) … … 484 499 485 500 imageID = self.scratchDb.getImageIDFromExternID(sourceID, externID) 486 self.logger. info("Updating table '" + table + "' with DVO IDs using imageID = %d" % imageID)501 self.logger.debug("Updating table '" + table + "' with DVO IDs using imageID = %d" % imageID) 487 502 sql = "UPDATE IGNORE " + table + " AS a, " + self.scratchDb.dvoDetection + " AS b SET \ 488 503 a.ippObjID = b.ippObjID, \ … … 494 509 AND b.imageID = " + str(imageID) 495 510 496 self.scratchDb. stmt.execute(sql)511 self.scratchDb.execute(sql) 497 512 498 513 … … 511 526 512 527 # loop through all OTAs and populate ImageMeta extensions 528 self.logger.info("Reading all fits headers and populating ImageMeta tables") 513 529 for x in range(self.startX, self.endX): 514 530 for y in range(self.startY, self.endY): … … 524 540 # load corresponding header into memory 525 541 header = self.findAndReadFITSHeader(ota + ".hdr", file) 542 if not header: 543 self.logger.error("No header found for OTA " + ota) 544 continue 545 526 546 527 547 # store sourceID/imageID combo in Db so DVO can look up later … … 578 598 579 599 # update ImageMeta with count of detections for this OTA and photoCodeID 580 sql = "UPDATE ImageMeta_" + ota + " SET nDetect = %d, photoCalID = %d" % (self.scratchDb.getRowCount("Detection_" + ota), self.scratchDb.getPhotoCalID(sourceIDs[ota], imageIDs[ota])) 581 self.scratchDb.stmt.execute(sql) 600 sql = "UPDATE ImageMeta_" + ota + " \ 601 SET nDetect = %d, photoCalID = %d" % (self.scratchDb.getRowCount("Detection_" + ota), self.scratchDb.getPhotoCalID(sourceIDs[ota], imageIDs[ota])) 602 self.scratchDb.execute(sql) 582 603 583 604 self.populateSkinnyObjectTable(ota) … … 603 624 # update FrameMeta with count OTAs in this file and total number of photometric reference sources 604 625 sql = "UPDATE FrameMeta SET nOTA = %d, numPhotoRef = %d" % (otaCount, self.totalNumPhotoRef) 605 self.scratchDb. stmt.execute(sql)626 self.scratchDb.execute(sql) 606 627 607 628 return True … … 613 634 614 635 sql = "UPDATE " + tableName + " SET imageID = %d%d%d" % (self.expID, x, y) 615 self.scratchDb. stmt.execute(sql)636 self.scratchDb.execute(sql) 616 637 617 638 ''' … … 619 640 ''' 620 641 def alreadyProcessed(self): 642 643 meta = self.gpc1Db.getCameraStageMeta(self.id) 644 self.expID = meta[0]; 645 self.expName = meta[1]; 646 self.distGroup = meta[2]; 621 647 622 648 return self.ippToPspsDb.alreadyProcessed("detection", "exp_id", self.expID) … … 634 660 635 661 662 ''' 663 Overriding this method. Filter to only import *.psf extensions 664 ''' 665 def importIppTables(self, filter=""): 666 return super(DetectionBatch, self).importIppTables(".*.psf") 667 668 669 ''' 670 Overriding this method. Use regex to trim off, eg _XY33 extension 671 ''' 672 def exportPspsTablesToFits(self, regex="(.*)"): 673 return super(DetectionBatch, self).exportPspsTablesToFits("([a-zA-Z]+)") 674 675 676 # TODO put in config 677 useFullTables=True 678 testMode=False 679 636 680 logging.config.fileConfig("logging.conf") 637 681 logger = logging.getLogger("detectionbatch") 682 logger.setLevel(logging.INFO) 638 683 logger.info("Starting") 639 684 640 685 gpc1Db = Gpc1Db(logger) 686 ippToPspsDb = IppToPspsDb(logger) 687 scratchDb = ScratchDb(logger, useFullTables) 688 641 689 camIDs = gpc1Db.getIDsInThisDVODbForThisStage("MD04.V2", "cam") 642 690 logger.info("Found %d exposures" % len(camIDs)) … … 645 693 for camID in camIDs: 646 694 647 logger.info("-------------------------------------------------- cam ID: %d" % camID)695 #if camID < 43764: continue # TODO 648 696 649 697 file = gpc1Db.getCameraStageSmf(camID) 650 if not os.path.isfile(file): 651 logger.error("Cannot read file at '" + file) 652 continue 653 654 detectionBatch = DetectionBatch(logger, camID, file, False, True) 655 656 if not detectionBatch.alreadyProcessed(): 657 658 detectionBatch.createEmptyPspsTables() 659 detectionBatch.importIppTables(".*.psf") 660 if detectionBatch.populatePspsTables(): 661 detectionBatch.exportPspsTablesToFits("([a-zA-Z]+)") 662 detectionBatch.writeBatchManifest() 663 #detectionBatch.reportNullsInAllPspsTables(False) 664 #detectionBatch.createTarball() 665 #detectionBatch.publishToDatastore() 666 667 i = i+1 668 # if i > 0: sys.exit() 669 698 699 detectionBatch = DetectionBatch(logger, 700 gpc1Db, 701 ippToPspsDb, 702 scratchDb, 703 camID, 704 file, 705 testMode, 706 useFullTables) 707 detectionBatch.run() 708 -
branches/eam_branches/ipp-20110505/ippToPsps/jython/gpc1db.py
r31400 r31587 29 29 self.logger.debug("Gpc1Db destructor") 30 30 31 32 ''' 33 TODO 34 ''' 35 def getIDsInThisDVODbForThisStageFudge(self): 36 37 sql = "SELECT staticskyRun.sky_id \ 38 FROM staticskyInput, staticskyRun, stackRun, staticskyResult \ 39 WHERE staticskyRun.sky_id = staticskyInput.sky_id \ 40 AND staticskyInput.stack_id = stackRun.stack_id \ 41 AND staticskyInput.sky_id = staticskyResult.sky_id \ 42 AND staticskyRun.label like 'MD04.staticsky' \ 43 AND stackRun.filter like 'i%'" 44 45 try: 46 rs = self.executeQuery(sql) 47 except: 48 self.logger.exception("Can't query for ids in DVO") 49 50 ids = [] 51 while (rs.next()): 52 ids.append(rs.getInt(1)) 53 54 rs.close() 55 56 self.logger.info("Found %d items in DVO database '" % (len(ids))) 57 58 return ids 59 31 60 ''' 32 61 Gets a list of ids in this DVO database for this stage, could be cam or staticsky (so far) … … 40 69 41 70 try: 42 rs = self. stmt.executeQuery(sql)71 rs = self.executeQuery(sql) 43 72 except: 44 73 self.logger.exception("Can't query for ids in DVO") … … 50 79 rs.close() 51 80 52 self.logger. debug("Found %d items in DVO database '%s' for stage='%s'" % (len(ids), dvoDb, stage))81 self.logger.info("Found %d items in DVO database '%s' for stage='%s'" % (len(ids), dvoDb, stage)) 53 82 54 83 return ids … … 73 102 74 103 try: 75 rs = self. stmt.executeQuery(sql)104 rs = self.executeQuery(sql) 76 105 except: 77 106 self.logger.exception("Can't query for imageIDs") … … 105 134 106 135 try: 107 rs = self. stmt.executeQuery(sql)136 rs = self.executeQuery(sql) 108 137 rs.first() 109 138 meta.append(rs.getInt(1)) … … 113 142 114 143 return meta 144 115 145 ''' 116 146 Gets some camera-stage meta data for this cam_id … … 127 157 128 158 try: 129 rs = self. stmt.executeQuery(sql)159 rs = self.executeQuery(sql) 130 160 rs.first() 131 161 meta.append(rs.getInt(1)) … … 150 180 151 181 try: 152 rs = self. stmt.executeQuery(sql)182 rs = self.executeQuery(sql) 153 183 rs.first() 154 184 except: … … 171 201 files = glob.glob(path + "/*.cmf") 172 202 203 if len(files) < 1: return "NULL" 204 173 205 return files[0] # TODO just returning first file - check 174 206 … … 186 218 187 219 try: 188 rs = self. stmt.executeQuery(sql)220 rs = self.executeQuery(sql) 189 221 rs.first() 190 222 except: … … 200 232 201 233 f=os.popen("neb-ls -p "+path+"%cmf") 202 print "neb-ls -p "+path+"%cmf"203 234 for i in f.readlines(): 204 235 files.append(i.rstrip()) 205 print i.rstrip()206 236 207 237 # or not a neb path … … 211 241 return files 212 242 243 244 ''' 245 TODO hack to get exposure time for a stack 246 ''' 247 def getStackExpTime(self, stackID): 248 249 self.logger.debug("Querying GPC1 for stack exposure time") 250 251 sql = "SELECT SUM(exp_time) * (COUNT(warp_id) - reject_images) / COUNT(warp_id) as EXPTIME \ 252 FROM staticskyRun JOIN staticskyInput using(sky_id) \ 253 JOIN stackRun using(stack_id) \ 254 JOIN stackSumSkyfile using(stack_id) \ 255 JOIN stackInputSkyfile using(stack_id) \ 256 JOIN warpRun using(warp_id) \ 257 JOIN fakeRun using(fake_id) \ 258 JOIN camRun using(cam_id) \ 259 JOIN chipRun using(chip_id) \ 260 JOIN rawExp using(exp_id) \ 261 WHERE stack_id = %d" % stackID 262 263 try: 264 rs = self.executeQuery(sql) 265 rs.first() 266 return rs.getInt(1) 267 except: 268 self.logger.exception("Can't query for exposure time") 269 270 return 0.0 271 272 -
branches/eam_branches/ipp-20110505/ippToPsps/jython/ipptopspsdb.py
r31355 r31587 37 37 )" 38 38 39 self. stmt.execute(sql)39 self.execute(sql) 40 40 41 41 sql = "SELECT MAX(batch_id) FROM batch" … … 44 44 45 45 try: 46 rs = self. stmt.executeQuery(sql)46 rs = self.executeQuery(sql) 47 47 rs.first() 48 48 batchID = rs.getInt(1) … … 64 64 WHERE batch_id = " + str(batchID) 65 65 66 self. stmt.execute(sql)66 self.execute(sql) 67 67 68 68 ''' … … 75 75 WHERE batch_id = " + str(batchID) 76 76 77 self. stmt.execute(sql)77 self.execute(sql) 78 78 79 79 ''' … … 86 86 WHERE batch_id = " + str(batchID) 87 87 88 self. stmt.execute(sql)88 self.execute(sql) 89 89 90 90 ''' … … 101 101 102 102 try: 103 rs = self. stmt.executeQuery(sql)103 rs = self.executeQuery(sql) 104 104 rs.first() 105 105 if rs.getInt(1) > 0: … … 128 128 )" 129 129 130 self. stmt.execute(sql)130 self.execute(sql) 131 131 132 132 ''' … … 149 149 )" 150 150 151 self. stmt.execute(sql)151 self.execute(sql) 152 152 153 153 -
branches/eam_branches/ipp-20110505/ippToPsps/jython/mysql.py
r31401 r31587 37 37 self.url = "jdbc:mysql://"+self.dbHost+"/"+self.dbName+"?user="+self.dbUser+"&password="+self.dbPass 38 38 self.con = DriverManager.getConnection(self.url) 39 self.stmt = self.con.createStatement() 39 self.connectionID = self.getLastConnectionID() 40 self.logger.info("MySQL connection to %s with ID %d" % (dbType, self.connectionID)) 41 42 #self.stmt = self.con.createStatement() 43 44 45 ''' 46 Disconnect from database 47 ''' 48 def disconnect(self): 49 self.con.close() 40 50 41 51 ''' … … 45 55 46 56 self.logger.debug("MySql destructor") 47 self.stmt.close() 48 self.con.close() 57 self.disconnect() 58 59 ''' 60 Kills the last connection ID, so long as it's not THIS connection ID 61 ''' 62 def killLastConnectionID(self): 63 64 connectionID = self.getLastConnectionID() 65 if connectionID == self.connectionID: 66 self.logger.error("NOT going to kill THIS connection ID") 67 return 68 69 sql = "KILL %d" % connectionID 70 self.execute(sql) 71 72 ''' 73 Gets the last connection ID 74 ''' 75 def getLastConnectionID(self): 76 77 sql = "SELECT ID \ 78 FROM INFORMATION_SCHEMA.PROCESSLIST \ 79 WHERE DB='" + self.dbName + "' \ 80 ORDER BY ID" 81 rs = self.executeQuery(sql) 82 rs.last() 83 return rs.getInt(1) 49 84 50 85 ''' … … 54 89 55 90 sql = "UPDATE " + table + " SET " + column + " = " + value 56 self. stmt.execute(sql)91 self.execute(sql) 57 92 58 93 ''' … … 62 97 63 98 sql = "DROP TABLE " + table 64 try: self. stmt.execute(sql)99 try: self.execute(sql) 65 100 except: return 66 101 … … 74 109 sql = "ALTER TABLE " + table + " ADD UNIQUE (" + column + ")" 75 110 try: 76 self. stmt.execute(sql)111 self.execute(sql) 77 112 except: pass 78 113 #self.logger.warn("Index already in place on '" + column + "' for table '" + table + "'") … … 82 117 def createIndex(self, table, column): 83 118 84 self.logger.debug("Creating index on column '"+column+"' for table '"+table+"'")119 #self.logger.debug("Creating index on column '"+column+"' for table '"+table+"'") 85 120 86 121 sql = "CREATE INDEX "+table+"_"+column+"_index ON "+table+" ("+column+")" 87 122 try: 88 self. stmt.execute(sql)123 self.execute(sql) 89 124 except: pass 90 125 #self.logger.warn("Index already in place on '" + column + "' for table '" + table + "'") 126 ''' 127 TODO 128 ''' 129 def execute(self, sql): 130 131 stmt = self.con.createStatement() 132 stmt.execute(sql) 133 stmt.close() 134 135 ''' 136 TODO 137 ''' 138 def executeQuery(self, sql): 139 140 stmt = self.con.createStatement() 141 rs = stmt.executeQuery(sql) 142 #stmt.close() 143 return rs 91 144 92 145 ''' … … 96 149 97 150 sql = "SHOW COLUMNS FROM " + tableName 98 rs = self. stmt.executeQuery(sql)151 rs = self.executeQuery(sql) 99 152 columns = [] 100 153 while (rs.next()): columns.append(rs.getString(1)) … … 109 162 110 163 sql = "UPDATE " + tableName + " SET " + column + " = " + sub + " WHERE " + column + " IS NULL" 111 self. stmt.execute(sql)164 self.execute(sql) 112 165 113 166 ''' … … 123 176 124 177 sql = "UPDATE " + tableName + " SET " + column + " = " + sub + " WHERE " + column + " IS NULL" 125 self. stmt.execute(sql)178 self.execute(sql) 126 179 127 180 ''' … … 131 184 132 185 sql = "SELECT COUNT(*) FROM " + tableName + " WHERE " + columnName + " = " + value 133 rs = self. stmt.executeQuery(sql)186 rs = self.executeQuery(sql) 134 187 rs.first() 135 nBad Flux= rs.getInt(1)136 self.logger.info("% d NULL %s values in table %s. Deleting." % (nBadFlux, columnName, tableName))188 nBad = rs.getInt(1) 189 self.logger.info("%5d NULL %s values in table %s. Deleting." % (nBad, columnName, tableName)) 137 190 138 191 sql="DELETE from " + tableName + " WHERE " + columnName + " = " + value 139 self. stmt.execute(sql)192 self.execute(sql) 140 193 141 194 ''' … … 145 198 146 199 sql = "SELECT COUNT(*) FROM " + tableName + " WHERE " + columnName + " IS NULL" 147 rs = self. stmt.executeQuery(sql)200 rs = self.executeQuery(sql) 148 201 rs.first() 149 nBad Flux= rs.getInt(1)150 self.logger.info("% d NULL %s values in table %s. Deleting." % (nBadFlux, columnName, tableName))202 nBad = rs.getInt(1) 203 self.logger.info("%5d NULL %s values in table %s. Deleting." % (nBad, columnName, tableName)) 151 204 152 205 sql="DELETE from " + tableName + " WHERE " + columnName + " IS NULL" 153 self. stmt.execute(sql)206 self.execute(sql) 154 207 155 208 ''' … … 160 213 # first, count rows 161 214 sql = "SELECT COUNT(*) FROM " + tableName 162 rs = self. stmt.executeQuery(sql)215 rs = self.executeQuery(sql) 163 216 rs.first() 164 217 numRows = rs.getInt(1) … … 175 228 176 229 sql = "SELECT COUNT(*) FROM " + tableName + " WHERE " + column + " IS NULL" 177 rs = self. stmt.executeQuery(sql)230 rs = self.executeQuery(sql) 178 231 rs.first() 179 232 if rs.getInt(1) == numRows: … … 191 244 sql = "SELECT COUNT(*) FROM " + table 192 245 try: 193 rs = self. stmt.executeQuery(sql)246 rs = self.executeQuery(sql) 194 247 rs.first() 195 248 return rs.getInt(1) -
branches/eam_branches/ipp-20110505/ippToPsps/jython/scratchdb.py
r31399 r31587 45 45 sql = "SELECT surveyID FROM Survey WHERE name = '" + name + "'" 46 46 try: 47 rs = self. stmt.executeQuery(sql)47 rs = self.executeQuery(sql) 48 48 rs.first() 49 49 return rs.getInt(1) … … 61 61 sql = "SELECT flags FROM " + self.dvoMeta + " WHERE sourceID = %s AND externID = %s" % (sourceID, externID) 62 62 try: 63 rs = self. stmt.executeQuery(sql)63 rs = self.executeQuery(sql) 64 64 rs.first() 65 65 flags = rs.getInt(1) … … 78 78 sql = "SELECT imageID FROM " + self.dvoMeta + " WHERE sourceID = %s AND externID = %s" % (sourceID, externID) 79 79 try: 80 rs = self. stmt.executeQuery(sql)80 rs = self.executeQuery(sql) 81 81 rs.first() 82 82 imageID = rs.getInt(1) … … 95 95 sql = "SELECT photcode FROM " + self.dvoMeta + " WHERE sourceID = %s AND externID = %s" % (sourceID, externID) 96 96 try: 97 rs = self. stmt.executeQuery(sql)97 rs = self.executeQuery(sql) 98 98 rs.first() 99 99 photcode = rs.getInt(1) … … 109 109 110 110 sql = "UPDATE "+table+" AS a, Filter AS b SET a.filterID=b.filterID WHERE b.filterType = '" + filter + "'" 111 self. stmt.execute(sql)111 self.execute(sql) 112 112 113 113 ''' … … 123 123 " + str(imageID) + " \ 124 124 )" 125 self. stmt.execute(sql)125 self.execute(sql) 126 126 127 127 ''' … … 131 131 132 132 sql = "INSERT INTO dvoDone (name) VALUES ('" + name + "')" 133 self. stmt.execute(sql)133 self.execute(sql) 134 134 135 135 ''' … … 141 141 142 142 try: 143 rs = self. stmt.executeQuery(sql)143 rs = self.executeQuery(sql) 144 144 rs.first() 145 145 if rs.getInt(1) > 0: … … 160 160 161 161 sql = "DROP TABLE dvoMeta" 162 try: self. stmt.execute(sql)162 try: self.execute(sql) 163 163 except: pass 164 164 165 165 sql = "DROP TABLE dvoDetection" 166 try: self. stmt.execute(sql)166 try: self.execute(sql) 167 167 except: pass 168 168 … … 175 175 )" 176 176 177 try: self. stmt.execute(sql)177 try: self.execute(sql) 178 178 except: 179 179 self.logger.error("Unable to create DVO meta-data database table") … … 193 193 #INDEX (ippDetectID) \ 194 194 195 try: self. stmt.execute(sql)195 try: self.execute(sql) 196 196 except: 197 197 self.logger.error("Unable to create DVO detection database table") -
branches/eam_branches/ipp-20110505/ippToPsps/jython/stackbatch.py
r31402 r31587 8 8 from java.sql import * 9 9 10 from batch import Batch 10 11 from gpc1db import Gpc1Db 11 from batch import Batch 12 from ipptopspsdb import IppToPspsDb 13 from scratchdb import ScratchDb 14 12 15 import logging.config 13 16 … … 20 23 Constructor 21 24 ''' 22 def __init__(self, logger, skyID, inputFile, stackType, useFullTables=False): 25 def __init__(self, 26 logger, 27 gpc1Db, 28 ippToPspsDb, 29 scratchDb, 30 skyID, 31 inputFile, 32 stackType, 33 useFullTables=False): 34 23 35 super(StackBatch, self).__init__( 24 36 logger, 37 gpc1Db, 38 ippToPspsDb, 39 scratchDb, 40 skyID, 25 41 "stack", 26 42 inputFile, … … 28 44 useFullTables) # TODO 29 45 30 self.logger.info("StackBatch constructor. Creating batch from: '" + inputFile + "'") 31 32 self.skyID = skyID 33 34 # get filterID using init table 35 self.filter = self.header['FPA.FILTER'] 36 self.filter = self.filter[0:1] 37 38 self.stackType = stackType 39 meta = self.gpc1Db.getStackStageMeta(self.skyID, self.header['FPA.FILTER']) 40 if len(meta) < 1: return 41 self.stackID = meta[0]; 42 self.skycell = meta[1]; 43 44 # determine skycell from header value 45 #self.skycell = "skycell.34" #= self.header['SKYCELL'] 46 self.skycell = self.skycell[8:] 47 48 self.logger.info("Processing stack with ID: %d, type: %s and skycell: %s filter: %s" % (self.stackID, self.stackType, self.skycell, self.filter)) 49 46 if not self.everythingOK: return 47 48 self.expTime = gpc1Db.getStackExpTime(self.stackID) 49 50 self.logger.info("got exp time of %d" % self.expTime) 51 52 # meta data to the log 53 self.logger.info("New Stack Batch:") 54 self.logger.info("Sky ID: %d" % self.id) 55 self.logger.info("File: %s" % inputFile) 56 self.logger.info("Stack ID: %d" % self.stackID) 57 self.logger.info("Stack type: %s" % self.stackType) 58 self.logger.info("Skycell: %s" % self.skycell) 59 self.logger.info("Filter: %s" % self.filter) 50 60 51 61 # delete PSPS tables … … 58 68 self.scratchDb.dropTable("ObjectCalColor") 59 69 60 # delete IPP tables61 #self.scratchDb.dropTable("SkyChip_psf")62 #self.scratchDb.dropTable("SkyChip_xsrc")63 #self.scratchDb.dropTable("SkyChip_xfit")64 #self.scratchDb.dropTable("SkyChip_xrad")65 66 self.logger.info("Stack type: " + self.safeDictionaryAccess(self.header, self.stackType))67 # obs time makes no sense except for nightly stacks68 #if self.header['STK_TYPE'] != "NIGHTLY_STACK": self.header['MJD-OBS'] = "-999"69 70 70 # create an output filename, which is {filterID}{skycellID}.FITS 71 self.outputFitsFile = "% s%07d.FITS" % (self.filter, int(self.skycell))71 self.outputFitsFile = "%08d.FITS" % self.stackID 72 72 self.outputFitsPath = "%s/%s" % (self.localOutPath, self.outputFitsFile) 73 73 … … 77 77 78 78 # insert what we know about this stack batch into the stack table 79 self.ippToPspsDb.insertStackMeta(self.batchID, self. skyID, self.stackID, self.filter, self.stackType)79 self.ippToPspsDb.insertStackMeta(self.batchID, self.id, self.stackID, self.filter, self.stackType) 80 80 81 81 # insert sourceID/imageID combo so DVO can look it up … … 89 89 90 90 sql = "UPDATE " + table + " SET stackMetaID=" + str(self.stackID) 91 self.scratchDb. stmt.execute(sql)91 self.scratchDb.execute(sql) 92 92 93 93 ''' … … 97 97 98 98 sql = "UPDATE "+table+" AS a, StackType AS b SET a.stackTypeID=b.stackTypeID WHERE b.name = '" + self.stackType + "'" 99 self.scratchDb. stmt.execute(sql)99 self.scratchDb.execute(sql) 100 100 101 101 … … 148 148 WHERE a.ippDetectID=b.IPP_IDET AND b.PSF_FWHM "+psfCondition 149 149 150 self.scratchDb. stmt.execute(sql)150 self.scratchDb.execute(sql) 151 151 152 152 ''' … … 196 196 WHERE a.ippDetectID=b.IPP_IDET AND b.MODEL_TYPE = '"+ippModelType+"'" 197 197 198 self.scratchDb. stmt.execute(sql)198 self.scratchDb.execute(sql) 199 199 200 200 # sersic fit has an extra parameter … … 213 213 WHERE a.ippDetectID=b.IPP_IDET AND b.MODEL_TYPE = '"+ippModelType+"'" 214 214 215 self.scratchDb. stmt.execute(sql)215 self.scratchDb.execute(sql) 216 216 217 217 … … 220 220 ''' 221 221 def populateStackMeta(self): 222 222 223 self.logger.info("Procesing StackMeta table") 223 224 … … 246 247 ," + str(self.scratchDb.getPhotoCalID(self.header['SOURCEID'], self.header['IMAGEID'])) + " \ 247 248 ," + self.header['FPA.ZP'] + " \ 248 ," + s elf.header['EXPTIME']+ " \249 ," + str(self.expTime) + " \ 249 250 ,'" + self.safeDictionaryAccess(self.header, 'PSFMODEL') + "' \ 250 251 ,'" + self.header['CTYPE1'] + "' \ … … 261 262 ," + self.header['PC002002'] + " \ 262 263 )" 263 self.scratchDb. stmt.execute(sql)264 self.scratchDb.execute(sql) 264 265 265 266 self.scratchDb.updateAllRows("StackMeta", "surveyID", str(self.surveyID)) … … 272 273 ''' 273 274 def populateStackDetection(self): 275 274 276 self.logger.info("Procesing StackDetection table") 275 277 … … 321 323 ,X_PSF_SIG \ 322 324 ,Y_PSF_SIG \ 323 ,POW(10.0, (-0.4*PSF_INST_MAG)) / "+s elf.header['EXPTIME']+" \324 ,ABS((PSF_INST_MAG_SIG*(POW(10.0, (-0.4*PSF_INST_MAG)) / "+s elf.header['EXPTIME']+")) / 1.085736) \325 ,POW(10.0, (-0.4*PEAK_FLUX_AS_MAG)) / "+s elf.header['EXPTIME']+" \325 ,POW(10.0, (-0.4*PSF_INST_MAG)) / "+str(self.expTime)+" \ 326 ,ABS((PSF_INST_MAG_SIG*(POW(10.0, (-0.4*PSF_INST_MAG)) / "+str(self.expTime)+")) / 1.085736) \ 327 ,POW(10.0, (-0.4*PEAK_FLUX_AS_MAG)) / "+str(self.expTime)+" \ 326 328 ,SKY \ 327 329 ,SKY_SIGMA \ … … 352 354 FROM SkyChip_psf" 353 355 354 self.scratchDb. stmt.execute(sql)356 self.scratchDb.execute(sql) 355 357 356 358 self.scratchDb.updateAllRows("StackDetection", "surveyID", str(self.surveyID)) … … 362 364 self.updateStackTypeID("StackDetection") 363 365 self.updateDvoIDs("StackDetection") 364 365 # now delete bad flux 366 sql = "ALTER TABLE StackDetection ADD PRIMARY KEY (objID, stackDetectID)" 367 self.scratchDb.execute(sql) 368 369 if self.stackType == "DEEP_STACK": 370 371 #if deep stack and instFlux = null and err not null 372 sql = "UPDATE StackDetection AS a, SkyChip_psf AS b \ 373 SET instFlux = 2*b.PSF_INST_FLUX_SIG \ 374 WHERE instFlux IS NULL \ 375 AND b.PSF_INST_FLUX_SIG IS NOT NULL" 376 self.scratchDb.execute(sql) 377 # instFlux = 2*PSF_INST_FLUX_SIG 378 366 379 self.scratchDb.reportAndDeleteRowsWithNULLS("StackDetection", "instFlux") 367 380 self.scratchDb.reportAndDeleteRowsWithNULLS("StackDetection", "objID") … … 372 385 ''' 373 386 def populateStackApFlx(self): 387 374 388 self.logger.info("Procesing StackApFlx table") 375 389 … … 381 395 382 396 try: 383 self.scratchDb. stmt.execute(sql)397 self.scratchDb.execute(sql) 384 398 except: return 385 399 386 400 # TODO temporarily loading 1st convolved fluxes into unconvolved fields 387 self.logger.info(" Adding un-convolved fluxes")401 self.logger.info("Adding un-convolved fluxes") 388 402 self.updateApFlxs("", "< 7.0") 389 self.logger.info(" Adding 1st convolved fluxes")403 self.logger.info("Adding 1st convolved fluxes") 390 404 self.updateApFlxs("c1", "< 7.0") 391 self.logger.info(" Adding 2nd convolved fluxes")405 self.logger.info("Adding 2nd convolved fluxes") 392 406 self.updateApFlxs("c2", "> 7.0") 393 407 394 self.logger.info(" Adding petrosians for extended sources")408 self.logger.info("Adding petrosians for extended sources") 395 409 sql = "UPDATE StackApFlx AS a, SkyChip_xsrc AS b SET \ 396 410 petRadius=b.PETRO_RADIUS \ … … 403 417 ,petR90Err=b.PETRO_RADIUS_90_ERR \ 404 418 WHERE a.ippDetectID=b.IPP_IDET" 405 self.scratchDb. stmt.execute(sql)419 self.scratchDb.execute(sql) 406 420 407 421 self.scratchDb.updateAllRows("StackApFlx", "surveyID", str(self.surveyID)) … … 413 427 self.updateStackTypeID("StackApFlx") 414 428 self.updateDvoIDs("StackApFlx") 429 self.scratchDb.reportAndDeleteRowsWithNULLS("StackApFlx", "objID") 430 self.deleteDetectionsNotInStackDetection("StackApFlx") 431 415 432 416 433 ''' … … 418 435 ''' 419 436 def populateStackModelFit(self): 437 420 438 self.logger.info("Procesing StackModelFit table") 421 439 … … 423 441 sql = "INSERT INTO StackModelFit (ippDetectID) SELECT DISTINCT IPP_IDET from SkyChip_xfit" 424 442 try: 425 self.scratchDb. stmt.execute(sql)443 self.scratchDb.execute(sql) 426 444 except: 427 445 return 428 446 429 430 447 # populate model parameters 431 self.logger.info(" Adding deVaucouleurs fit")448 self.logger.info("Adding deVaucouleurs fit") 432 449 self.updateModelFit("deV", "PS_MODEL_DEV") 433 self.logger.info(" Adding exponential fit")450 self.logger.info("Adding exponential fit") 434 451 self.updateModelFit("exp", "PS_MODEL_EXP") 435 self.logger.info(" Adding sersic fit")452 self.logger.info("Adding sersic fit") 436 453 self.updateModelFit("ser", "PS_MODEL_SERSIC") 437 454 … … 444 461 self.updateStackTypeID("StackModelFit") 445 462 self.updateDvoIDs("StackModelFit") 463 self.scratchDb.reportAndDeleteRowsWithNULLS("StackModelFit", "objID") 464 self.deleteDetectionsNotInStackDetection("StackModelFit") 465 466 ''' 467 Reports and deletes detections in this table that are not in StackDetection 468 ''' 469 def deleteDetectionsNotInStackDetection(self, table): 470 471 sql = "SELECT COUNT(*) FROM " + table + " WHERE ippDetectID NOT IN (SELECT ippDetectID FROM StackDetection)" 472 rs = self.scratchDb.executeQuery(sql) 473 rs.first() 474 nMissing = rs.getInt(1) 475 self.logger.info("%5d detections in %s table that are not in StackDetection. Deleting" % (nMissing, table)) 476 477 if nMissing < 1: return 478 479 sql = "DELETE FROM " + table + " WHERE ippDetectID NOT IN (SELECT ippDetectID FROM StackDetection)" 480 self.scratchDb.execute(sql) 481 446 482 447 483 ''' … … 449 485 ''' 450 486 def populateStackToImage(self): 487 451 488 self.logger.info("Procesing StackToImage table") 452 489 … … 457 494 VALUES (\ 458 495 " + str(self.stackID) + ", " + imageID + ")" 459 self.scratchDb. stmt.execute(sql)496 self.scratchDb.execute(sql) 460 497 461 498 # now update StackMeta with correct number of inputs 462 499 sql = "UPDATE StackMeta SET nP2Images = (SELECT COUNT(*) FROM StackToImage)" 463 self.scratchDb. stmt.execute(sql)500 self.scratchDb.execute(sql) 464 501 465 502 ''' … … 467 504 ''' 468 505 def populateSkinnyObject(self): 506 469 507 self.logger.info("Procesing SkinnyObject table") 470 508 … … 474 512 ) \ 475 513 SELECT \ 476 objID \514 DISTINCT objID \ 477 515 ,ippObjID \ 478 516 FROM StackDetection" 479 self.scratchDb. stmt.execute(sql)517 self.scratchDb.execute(sql) 480 518 481 519 self.scratchDb.updateAllRows("SkinnyObject", "surveyID", str(self.surveyID)) … … 486 524 ''' 487 525 def populateObjectCalColor(self): 526 488 527 self.logger.info("Procesing ObjectCalColor table") 489 528 … … 493 532 ) \ 494 533 SELECT \ 495 objID \534 DISTINCT objID \ 496 535 ,ippObjID \ 497 536 FROM StackDetection" 498 self.scratchDb. stmt.execute(sql)537 self.scratchDb.execute(sql) 499 538 500 539 self.scratchDb.updateFilterID("ObjectCalColor", self.filter) … … 508 547 509 548 self.logger.info("Altering PSPS tables") 510 self.scratchDb.makeColumnUnique("StackDetection", "objID")549 #self.scratchDb.makeColumnUnique("StackDetection", "objID") 511 550 self.scratchDb.createIndex("StackDetection", "ippDetectID") 512 551 self.scratchDb.createIndex("StackApFlx", "ippDetectID") … … 531 570 imageID = self.scratchDb.getImageIDFromExternID(self.header['SOURCEID'], self.header['IMAGEID']) 532 571 533 self.logger. info("Updating table '" + table + "' with DVO IDs...")572 self.logger.debug("Updating table '" + table + "' with DVO IDs...") 534 573 sql = "UPDATE IGNORE " + table + " AS a, dvoDetectionFull AS b SET \ 535 574 a.ippObjID = b.ippObjID, \ … … 539 578 AND b.sourceID = " + self.header['SOURCEID'] + "\ 540 579 AND b.imageID = " + str(imageID) 541 self.scratchDb.stmt.execute(sql) 542 580 self.scratchDb.execute(sql) 543 581 544 582 ''' … … 553 591 self.populateStackMeta() 554 592 self.populateStackDetection() 555 self.populateStackModelFit() 556 self.populateStackApFlx() 593 594 if self.stackType != "NIGHTLY_STACK": 595 self.populateStackModelFit() 596 self.populateStackApFlx() 597 557 598 self.populateStackToImage() 558 599 self.populateSkinnyObject() … … 560 601 561 602 self.setMinMaxObjID(["StackDetection"]) 562 603 604 if self.totalDetections < 1: 605 606 self.logger.error("No detections to publish") 607 return False 608 563 609 return True 564 610 … … 568 614 def alreadyProcessed(self): 569 615 570 return self.ippToPspsDb.alreadyProcessed("stack", "stack_id", self.stackID) 616 # sadly, we have to read the FITS primary header first 617 if not self.readPrimaryHeader(): return False 618 619 # get filterID using init table 620 self.filter = self.header['FPA.FILTER'] 621 self.filter = self.filter[0:1] 622 623 self.stackType = stackType 624 meta = self.gpc1Db.getStackStageMeta(self.id, self.header['FPA.FILTER']) 625 if len(meta) < 1: return False 626 self.stackID = meta[0]; 627 self.skycell = meta[1]; 628 self.skycell = self.skycell[8:] 629 630 #return self.ippToPspsDb.alreadyProcessed("stack", "stack_id", self.stackID) 631 return False # TODOI 632 633 634 useFullTables=True 571 635 572 636 logging.config.fileConfig("logging.conf") 573 637 logger = logging.getLogger("stackbatch") 638 logger.setLevel(logging.INFO) 574 639 logger.info("Starting") 640 575 641 gpc1Db = Gpc1Db(logger) 576 stackType = "NIGHTLY_STACK" 577 skyIDs = gpc1Db.getIDsInThisDVODbForThisStage("MD04.Staticsky", "staticsky") 578 #skyIDs = gpc1Db.getIDsInThisDVODbForThisStage("MD04.GENE.PSPSDEEP", "staticsky") 579 #stackType = "DEEP_STACK" 580 #skyIDs = [689] 642 ippToPspsDb = IppToPspsDb(logger) 643 scratchDb = ScratchDb(logger, useFullTables) 644 645 #stackType = "NIGHTLY_STACK" 646 #skyIDs = gpc1Db.getIDsInThisDVODbForThisStageFudge() 647 #skyIDs = gpc1Db.getIDsInThisDVODbForThisStage("MD04.Staticsky", "staticsky") 648 649 stackType = "DEEP_STACK" 650 skyIDs = gpc1Db.getIDsInThisDVODbForThisStage("MD04.GENE.PSPSDEEP", "staticsky") 651 652 #skyIDs = [942] 581 653 #skyIDs = [299] 582 654 #skyIDs = [302] 583 655 #skyIDs = [8508] 584 i = 0656 #i = 0 585 657 for skyID in skyIDs: 586 587 logger.info("-------------------------------------------------- sky ID: %d" % skyID) 658 659 #if skyID < 1340: continue # nightly 660 #if skyID < 238: continue # deep 588 661 589 662 cmfFiles = gpc1Db.getStackStageCmfs(skyID) … … 591 664 for file in cmfFiles: 592 665 593 if not os.path.isfile(file): 594 logger.error("Cannot read file at '" + file) 595 continue 596 597 stackBatch = StackBatch(logger, skyID, file, stackType, True) 598 599 if not stackBatch.alreadyProcessed(): 600 601 stackBatch.createEmptyPspsTables() 602 stackBatch.importIppTables("") 603 if stackBatch.populatePspsTables(): 604 605 #stackBatch.reportNullsInAllPspsTables(False) 606 stackBatch.exportPspsTablesToFits() 607 stackBatch.writeBatchManifest() 608 #stackBatch.createTarball() 609 #stackBatch.publishToDatastore() 610 611 i = i + 1 612 #if i > 0: sys.exit() 613 614 logger.info("Finished") 666 stackBatch = StackBatch(logger, 667 gpc1Db, 668 ippToPspsDb, 669 scratchDb, 670 skyID, 671 file, 672 stackType, 673 useFullTables) 674 675 stackBatch.run() 676 -
branches/eam_branches/ipp-20110505/ippTools/share/laptool_definerun.sql
r31435 r31587 1 SELECT want.exp_id, have.chip_id, false as private, true as active, false as pairwise1 SELECT DISTINCT want.exp_id, have.chip_id, false as private, true as active, false as pairwise 2 2 FROM 3 3 (SELECT exp_id FROM rawExp … … 8 8 ) AS want 9 9 LEFT JOIN 10 (SELECT *10 (SELECT exp_id,MAX(chip_id) AS chip_id 11 11 FROM lapExp 12 12 where private IS FALSE -
branches/eam_branches/ipp-20110505/ippTools/share/laptool_exposures.sql
r31435 r31587 1 SELECT DISTINCT 2 D.*,diffRun.state, 3 coalesce(CONVERT(sum(others.private),SIGNED),0) AS needs_remade 4 -- 0 AS needs_remade 5 FROM ( 6 SELECT DISTINCT 7 W.*,CONVERT(IFNULL(diff1.diff_id,diff2.diff_id),SIGNED) AS diff_id FROM ( 8 SELECT DISTINCT 9 lap_id,lapRun.tess_id,projection_cell,filter,lapRun.state as lapRun_state, lapRun.registered, lapRun.fault, lapRun.label, lapRun.dist_group, 10 lapExp.exp_id,lapExp.chip_id,lapExp.pair_id,private,pairwise,active,lapExp.data_state, 11 chipRun.state as chipRun_state, 12 coalesce(CONVERT(sum(chipProcessedImfile.fault),SIGNED),0) as chip_faults, 13 coalesce(CONVERT(sum(chipProcessedImfile.quality),SIGNED),0) as chip_quality, 14 camRun.cam_id, camRun.state as camRun_state, 15 coalesce(CONVERT(sum(camProcessedExp.fault),SIGNED),0) AS cam_faults, 16 coalesce(CONVERT(sum(camProcessedExp.quality),SIGNED),0) AS cam_quality, 17 fakeRun.fake_id, fakeRun.state as fakeRun_state, 18 coalesce(CONVERT(sum(fakeProcessedImfile.fault),SIGNED),0) as fake_faults, 19 warpRun.warp_id, warpRun.state as warpRun_state, 20 coalesce(CONVERT(sum(warpSkyfile.fault),SIGNED),0) as warp_faults, 21 coalesce(CONVERT(sum(warpSkyfile.quality),SIGNED),0) as warp_quality, 22 warpRun.magicked 23 FROM lapRun JOIN lapExp USING(lap_id) 24 LEFT JOIN chipRun USING(chip_id) 25 LEFT JOIN chipProcessedImfile USING(chip_id) 26 LEFT JOIN camRun USING(chip_id) LEFT JOIN camProcessedExp USING(cam_id) 27 LEFT JOIN fakeRun USING(cam_id) LEFT JOIN fakeProcessedImfile USING(fake_id) 28 LEFT JOIN warpRun USING(fake_id) LEFT JOIN warpSkyfile USING(warp_id) 29 WHERE @WHERE@ 30 AND (warpSkyfile.quality IS NULL OR 31 (warpSkyfile.quality != 8007 -- known cases where quality != 0, but everything's fine. 32 AND warpSkyfile.quality != 3006 -- known cases where quality != 0, but everything's fine. 33 )) 34 GROUP BY lap_id,exp_id 35 ) AS W 36 -- This was unreasonably slow in testing, so that's why I'm using a subquery here. 37 LEFT JOIN diffInputSkyfile AS diff1 ON (W.warp_id = diff1.warp1) 38 LEFT JOIN diffInputSkyfile AS diff2 ON (W.warp_id = diff2.warp2) 39 ) AS D 1 select DISTINCT V3.*, 2 diffRun.diff_id,diffRun.state as diffRun_state, 3 coalesce(CONVERT(sum(diffSkyfile.quality != 0),SIGNED),0) AS diff_bad_quality, 4 coalesce(CONVERT(count(diffSkyfile.diff_id),SIGNED),0) AS diff_component_count 5 FROM 6 ( select V2.*, 7 warpRun.warp_id,warpRun.state as warpRun_state, 8 coalesce(CONVERT(sum(warpSkyfile.quality != 0),SIGNED),0) AS warp_bad_quality, 9 coalesce(CONVERT(count(warpSkyfile.warp_id),SIGNED),0) AS warp_component_count, 10 warpRun.magicked 11 FROM 12 ( select V1.*, 13 camRun.cam_id,camRun.state as camRun_state, 14 coalesce(CONVERT(sum(camProcessedExp.quality != 0),SIGNED),0) AS cam_bad_quality, 15 coalesce(CONVERT(count(camProcessedExp.cam_id),SIGNED),0) AS cam_component_count, 16 fakeRun.fake_id,fakeRun.state as fakeRun_state FROM 17 ( SELECT DISTINCT 18 lap_id,lapRun.tess_id,projection_cell,filter,lapRun.state as lapRun_state, lapRun.registered, lapRun.fault, lapRun.label, lapRun.dist_group, 19 lapExp.exp_id,lapExp.chip_id,lapExp.pair_id,private,pairwise,active,lapExp.data_state, 20 chipRun.state as chipRun_state, 21 coalesce(CONVERT(sum(chipProcessedImfile.quality != 0),SIGNED),0) AS chip_bad_quality, 22 coalesce(CONVERT(count(chipProcessedImfile.chip_id),SIGNED),0) AS chip_component_count 23 FROM lapRun JOIN lapExp USING(lap_id) 24 LEFT JOIN chipRun USING(chip_id) LEFT JOIN chipProcessedImfile USING(chip_id) 25 WHERE @WHERE@ 26 GROUP BY lap_id,exp_id 27 ) AS V1 28 LEFT JOIN camRun USING(chip_id) LEFT JOIN camProcessedExp USING(cam_id) 29 LEFT JOIN fakeRun USING(cam_id) 30 GROUP BY lap_id,exp_id 31 ) AS V2 32 LEFT JOIN warpRun USING(fake_id) LEFT JOIN warpSkyfile USING(warp_id) 33 GROUP BY lap_id,exp_id 34 ) AS V3 35 LEFT JOIN 36 (SELECT DISTINCT diff_id,warp1,warp2 FROM diffInputSkyfile) AS DI ON 37 (DI.warp1 = warp_id OR DI.warp2 = warp_id) 40 38 LEFT JOIN diffRun USING(diff_id) 41 LEFT JOIN lapExp AS others ON (D.chip_id = others.chip_id AND D.lap_id != others.lap_id)39 LEFT JOIN diffSkyfile USING(diff_id) 42 40 GROUP BY lap_id,exp_id 43 41 -
branches/eam_branches/ipp-20110505/ippTools/src/disttool.c
r30906 r31587 447 447 448 448 if (pretend) { 449 if (!ippdbPrintMetadatas(stdout, output, "newdistRuns", true)) {449 if (!ippdbPrintMetadatas(stdout, output, "newdistRuns", !simple)) { 450 450 psError(PS_ERR_UNKNOWN, false, "failed to print array"); 451 451 psFree(output); -
branches/eam_branches/ipp-20110505/ippTools/src/laptool.c
r31435 r31587 384 384 PXOPT_COPY_STR(config->args, where, "-projection_cell", "projection_cell", "=="); 385 385 PXOPT_COPY_STR(config->args, where, "-filter", "filter", "=="); 386 PXOPT_COPY_STR(config->args, where, "-label", "label", "==");386 // PXOPT_COPY_STR(config->args, where, "-label", "label", "=="); 387 387 PXOPT_COPY_STR(config->args, where, "-state", "state", "=="); 388 388 PXOPT_COPY_STR(config->args, where, "-fault", "fault", "=="); 389 389 390 pxAddLabelSearchArgs(config, where, "-label", "lapRun.label", "=="); 391 390 392 psString query = pxDataGet("laptool_pendingrun.sql"); 391 393 if (!query) { … … 560 562 psMetadata *where = psMetadataAlloc(); 561 563 PXOPT_COPY_S64(config->args, where, "-lap_id", "lap_id", "=="); 562 PXOPT_COPY_S64(config->args, where, "-exp_id", " exp_id", "==");564 PXOPT_COPY_S64(config->args, where, "-exp_id", "lapExp.exp_id", "=="); 563 565 564 566 psString query = pxDataGet("laptool_exposures.sql"); -
branches/eam_branches/ipp-20110505/ippTools/src/laptoolConfig.c
r31435 r31587 69 69 ADD_OPT(Str, pendingrunArgs, "-projection_cell", "search by projection cell", NULL); 70 70 ADD_OPT(Str, pendingrunArgs, "-filter", "search by filter", NULL); 71 ADD_OPT(Str, pendingrunArgs, "-label", "search by LAP run label", NULL); 71 // ADD_OPT(Str, pendingrunArgs, "-label", "search by LAP run label", NULL); 72 psMetadataAddStr(pendingrunArgs, PS_LIST_TAIL, "-label", PS_META_DUPLICATE_OK, "search by LAP run label", NULL); 72 73 ADD_OPT(Str, pendingrunArgs, "-state", "search by LAP run state", NULL); 73 74 ADD_OPT(Str, pendingrunArgs, "-fault", "search by LAP run fault", NULL); 74 75 ADD_OPT(Bool,pendingrunArgs, "-simple", "use the simple output format", false); 76 77 // -listrun 78 psMetadata *listrunArgs = psMetadataAlloc(); 79 ADD_OPT(S64, listrunArgs, "-seq_id", "search by LAP sequence ID", 0); 80 ADD_OPT(S64, listrunArgs, "-lap_id", "search by LAP run ID", 0); 81 ADD_OPT(Str, listrunArgs, "-projection_cell", "search by projection cell", NULL); 82 ADD_OPT(Str, listrunArgs, "-filter", "search by filter", NULL); 83 // ADD_OPT(Str, listrunArgs, "-label", "search by LAP run label", NULL); 84 psMetadataAddStr(listrunArgs, PS_LIST_TAIL, "-label", PS_META_DUPLICATE_OK, "search by LAP run label", NULL); 85 ADD_OPT(Str, listrunArgs, "-state", "search by LAP run state", NULL); 86 ADD_OPT(Str, listrunArgs, "-fault", "search by LAP run fault", NULL); 87 ADD_OPT(Bool,listrunArgs, "-simple", "use the simple output format", false); 75 88 76 89 // -updaterun … … 128 141 psMetadata *argSets = psMetadataAlloc(); 129 142 psMetadata *modes = psMetadataAlloc(); 130 143 131 144 PXOPT_ADD_MODE("-definesequence", "", LAPTOOL_MODE_DEFINESEQUENCE, definesequenceArgs); 132 145 PXOPT_ADD_MODE("-listsequence", "", LAPTOOL_MODE_LISTSEQUENCE, listsequenceArgs); 133 146 PXOPT_ADD_MODE("-definerun", "", LAPTOOL_MODE_DEFINERUN, definerunArgs); 134 147 PXOPT_ADD_MODE("-pendingrun", "", LAPTOOL_MODE_PENDINGRUN, pendingrunArgs); 148 PXOPT_ADD_MODE("-listrun", "", LAPTOOL_MODE_PENDINGRUN, listrunArgs); 135 149 PXOPT_ADD_MODE("-updaterun", "", LAPTOOL_MODE_UPDATERUN, updaterunArgs); 136 150 PXOPT_ADD_MODE("-pendingexp", "", LAPTOOL_MODE_PENDINGEXP, pendingexpArgs); -
branches/eam_branches/ipp-20110505/ippTools/src/pstamptool.c
r30543 r31587 843 843 psFree(where); 844 844 845 psStringAppend(&query, " ORDER BY priority DESC, req_id ");845 psStringAppend(&query, " ORDER BY priority DESC, req_id, job_id"); 846 846 847 847 // treat limit == 0 as "no limit" … … 1323 1323 psFree(where); 1324 1324 1325 psStringAppend(&query, " ORDER BY priority DESC, req_id ");1325 psStringAppend(&query, " ORDER BY priority DESC, req_id, dep_id"); 1326 1326 1327 1327 // treat limit == 0 as "no limit" -
branches/eam_branches/ipp-20110505/ippTools/src/pubtoolConfig.c
r30769 r31587 48 48 psMetadataAddStr(defineclientArgs, PS_LIST_TAIL, "-product", 0, "define product (required)", NULL); 49 49 psMetadataAddStr(defineclientArgs, PS_LIST_TAIL, "-workdir", 0, "define workdir (required)", NULL); 50 psMetadataAddS16(defineclientArgs, PS_LIST_TAIL, "-output_format", 0, "define output format", 2); 50 51 psMetadataAddStr(defineclientArgs, PS_LIST_TAIL, "-comment", 0, "define comment", NULL); 52 psMetadataAddStr(defineclientArgs, PS_LIST_TAIL, "-name", 0, "define name", NULL); 51 53 psMetadataAddBool(defineclientArgs, PS_LIST_TAIL, "-unmagicked", 0, "allow unmagicked data?", false); 52 54 -
branches/eam_branches/ipp-20110505/ippTools/src/stacktool.c
r31445 r31587 246 246 PXOPT_COPY_STR(config->args, where, "-select_data_group", "warpRun.data_group", "=="); 247 247 pxAddLabelSearchArgs (config, where, "-select_label", "warpRun.label", "LIKE"); // define using warp label 248 248 pxAddLabelSearchArgs (config, where, "-warp_id", "warpRun.warp_id", "=="); 249 249 250 // these are used to build the HAVING restriction 250 251 PXOPT_COPY_S32(config->args, having, "-min_num", "num_warp", ">="); … … 508 509 association->sass_id = sass_id; 509 510 } 511 else { 512 sass_id = association->sass_id; 513 } 510 514 // Insert the map entry for this row. 511 515 stackAssociationMapRow *maprow = stackAssociationMapRowAlloc(sass_id,stack_id); -
branches/eam_branches/ipp-20110505/ippTools/src/stacktoolConfig.c
r30945 r31587 91 91 psMetadataAddF64(definebyqueryArgs, PS_LIST_TAIL, "-select_zpt_obs_max", 0, "define max zero point", NAN); 92 92 psMetadataAddF64(definebyqueryArgs, PS_LIST_TAIL, "-select_astrom", 0, "define max astrometry rms", NAN); 93 psMetadataAddS64(definebyqueryArgs, PS_LIST_TAIL, "-warp_id", PS_META_DUPLICATE_OK, "include this warp ID (multiple OK)", 0); 93 94 psMetadataAddS32(definebyqueryArgs, PS_LIST_TAIL, "-random", 0, "use this number of random elements", 0); 94 95 psMetadataAddS32(definebyqueryArgs, PS_LIST_TAIL, "-min_num", 0, "minimum number of inputs", 0); -
branches/eam_branches/ipp-20110505/ppStack/src/ppStackPrepare.c
r31158 r31587 144 144 int numCols = 0, numRows = 0; // Size of image 145 145 options->sumExposure = 0.0; 146 int numWithSources = 0; 146 147 for (int i = 0; i < num; i++) { 147 148 pmFPAfile *file = pmFPAfileSelectSingle(config->files, "PPSTACK.INPUT", i); // File of interest … … 188 189 pmReadout *ro = pmFPAviewThisReadout(view, file->fpa); // Readout with sources 189 190 detections = psMetadataLookupPtr(NULL, ro->analysis, "PSPHOT.DETECTIONS"); // Sources 190 if (!detections || !detections->allSources ) {191 if (!detections || !detections->allSources || !detections->allSources->n) { 191 192 psWarning("No detections found for image %d --- rejecting.", i); 192 193 options->inputMask->data.PS_TYPE_VECTOR_MASK_DATA[i] = PPSTACK_MASK_CAL; … … 196 197 197 198 options->sourceLists->data[i] = psMemIncrRefCounter(detections->allSources); 199 numWithSources++; 198 200 } 199 201 … … 230 232 ppStackFileActivation(config, PPSTACK_FILES_PREPARE, true); 231 233 } 234 } 235 if (numWithSources < 2) { 236 // This can happen if the inputs have been destreaked 237 psErrorStackPrint(stderr, "Not enough inputs have sources"); 238 psWarning("No inputs have sources --- suspect bad data quality."); 239 if (options->quality == 0) { 240 options->quality = PPSTACK_ERR_DATA; 241 } 242 psErrorClear(); 243 psFree(view); 244 return false; 232 245 } 233 246 … … 308 321 psFree(psfs); 309 322 if (!options->psf) { 323 #if 1 310 324 psError(psErrorCodeLast(), false, "Unable to determine output PSF."); 325 #else 326 // This will repair the problem reproted in ticket 1427 but we aren't yet sure 327 // why ppStackPSF is failing so we are going to continue to fault for now 328 int errorCode = psErrorCodeLast(); 329 if (errorCode == PPSTACK_ERR_PSF) { 330 psErrorStackPrint(stderr, "Unable to determine output PSF."); 331 psWarning("Unable to determine output PSF --- suspect bad data quality."); 332 if (options->quality == 0) { 333 options->quality = errorCode; 334 } 335 psErrorClear(); 336 } else { 337 psError(psErrorCodeLast(), false, "Unable to determine output PSF."); 338 } 339 #endif // notyet 311 340 psFree(view); 312 341 return false; -
branches/eam_branches/ipp-20110505/psModules/src/imcombine/pmSubtractionStamps.c
r31451 r31587 834 834 } 835 835 836 psStatsInit (stats); 837 if (!psVectorStats (stats, flux1, NULL, NULL, 0)) { 838 psAbort ("failed to generate stats"); 839 } 840 float f1 = stats->sampleMedian; 841 842 psStatsInit (stats); 843 if (!psVectorStats (stats, flux2, NULL, NULL, 0)) { 844 psAbort ("failed to generate stats"); 845 } 846 float f2 = stats->sampleMedian; 836 float f1 = NAN; 837 if (flux1->n > 0) { 838 psStatsInit (stats); 839 if (!psVectorStats (stats, flux1, NULL, NULL, 0)) { 840 psAbort ("failed to generate stats"); 841 } 842 f1 = stats->sampleMedian; 843 } 844 845 float f2 = NAN; 846 if (flux2->n > 0) { 847 psStatsInit (stats); 848 if (!psVectorStats (stats, flux2, NULL, NULL, 0)) { 849 psAbort ("failed to generate stats"); 850 } 851 f2 = stats->sampleMedian; 852 } 847 853 848 854 stamps->window1->kernel[y][x] = f1; … … 893 899 float R2 = Sr2 / Sf2; 894 900 901 if (!isfinite(R1) || !isfinite(R2)) { 902 psError(PM_ERR_STAMPS, true, "Kron Radii are not finite (failure to converge)."); 903 psFree (stats); 904 psFree (flux1); 905 psFree (flux2); 906 psFree (norm1); 907 psFree (norm2); 908 return false; 909 } 910 895 911 // Compare the Kron Radii (R1 & R2) to above to the FWHMs : if they are too discrepant, we will need to rescale 896 912 psLogMsg ("psModules.imcombine", PS_LOG_DETAIL, "Kron Radii vs FWHMs 1: fwhm: %f, kron %f\n", fwhm1, R1); … … 901 917 stamps->normWindow2 = 2.75*R2; 902 918 psLogMsg ("psModules.imcombine", PS_LOG_DETAIL, "Windows from Kron Radii: %f for 1, %f for 2\n", stamps->normWindow1, stamps->normWindow2); 919 903 920 904 921 // if the calculated normWindows are too large, we will fall off the stamps. In this case, we need to try again. … … 913 930 psFree (norm2); 914 931 return false; 915 }916 917 if (!isfinite(R1) || !isfinite(R2)) {918 psError(PM_ERR_STAMPS, true, "Kron Radii are not finite (failure to converge).");919 psFree (stats);920 psFree (flux1);921 psFree (flux2);922 psFree (norm1);923 psFree (norm2);924 return false;925 932 } 926 933 -
branches/eam_branches/ipp-20110505/psModules/src/objects/models
- Property svn:mergeinfo deleted
-
branches/eam_branches/ipp-20110505/psconfig/psbuild
r31068 r31587 386 386 if (@ARGV != 2) { die "USAGE: psbuild -bootstrap (install_dir)\n"; } 387 387 $psconfdir = $ARGV[1]; 388 die "Target directory must be absolute, not relative: $psconfdir\n" unless $psconfdir =~ m|^/|; 388 389 389 390 # copy psconfig.csh and psconfig.bash to psconfdir -
branches/eam_branches/ipp-20110505/pstamp/scripts/pstamp_job_run.pl
r30850 r31587 146 146 # user required uncensored but since stage isn't chip we can't rebuild them 147 147 my_die("uncensored inputs not available for job $job_id", $job_id, $PSTAMP_NOT_AVAILABLE, 'stop'); 148 } elsif (($options & $PSTAMP_REQUEST_UNCENSORED) and ($params->{state} eq 'update') and ($stage ne 'chip')) {148 } elsif (($options & $PSTAMP_REQUEST_UNCENSORED) and ($params->{state} ne 'full') and ($stage ne 'chip')) { 149 149 # we can only restore pixels for chip stage images if the data has been updated. 150 # XXX: this test is not quite good enough. If all components have been updated then the151 # state will be 'full' But this will get us going.152 print "Run state is update: will make stamps from destreaked $stage images.\n";150 # the data will have been updated if the params->{state} the state when the job was queued is not 'full' ( 151 # XXX: we should probably be looking explicitly at the job and checking for a dep_id 152 print "Run state was $params->{state}: will make stamps from destreaked $stage images.\n"; 153 153 # make stamps from uncensored images 154 154 $muggle = 0; -
branches/eam_branches/ipp-20110505/pstamp/scripts/pstampparse.pl
r30793 r31587 754 754 if ( $num_jobs == 0 ) { 755 755 print STDERR "no jobs for row $rownum\n" if $verbose; 756 insertFakeJobForRow($row, 1, $PSTAMP_NO_ JOBS_QUEUED);756 insertFakeJobForRow($row, 1, $PSTAMP_NO_IMAGE_MATCH); 757 757 $num_jobs = 1; 758 $row->{job_num} = 1; 758 759 } 759 760 return $num_jobs;
Note:
See TracChangeset
for help on using the changeset viewer.
