Changeset 14455
- Timestamp:
- Aug 9, 2007, 10:25:52 AM (19 years ago)
- Location:
- trunk/psModules/src/imcombine
- Files:
-
- 6 edited
-
pmSubtraction.c (modified) (33 diffs)
-
pmSubtraction.h (modified) (2 diffs)
-
pmSubtractionKernels.c (modified) (22 diffs)
-
pmSubtractionKernels.h (modified) (2 diffs)
-
pmSubtractionStamps.c (modified) (3 diffs)
-
pmSubtractionStamps.h (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
trunk/psModules/src/imcombine/pmSubtraction.c
r14420 r14455 4 4 * @author GLG, MHPCC 5 5 * 6 * @version $Revision: 1.3 3$ $Name: not supported by cvs2svn $7 * @date $Date: 2007-08-0 7 19:02:55$6 * @version $Revision: 1.34 $ $Name: not supported by cvs2svn $ 7 * @date $Date: 2007-08-09 20:25:52 $ 8 8 * 9 9 * Copyright 2004-2007 Institute for Astronomy, University of Hawaii … … 30 30 // Private (file-static) functions 31 31 ////////////////////////////////////////////////////////////////////////////////////////////////////////////// 32 33 // Return the number of polynomial terms there are for a given order 34 static inline long polyTerms(int order // Order of polynomial 35 ) 36 { 37 return (order + 1) * (order + 2) / 2; 38 } 32 39 33 40 // Given a stamp coordinates (x,y), generate a matrix where the elements (i,j) are x^i * y^j … … 160 167 case PM_SUBTRACTION_KERNEL_ISIS: { \ 161 168 psKernel *preCalc = (KERNELS)->preCalc->data[i]; /* Precalculated values */ \ 162 psKernel *subKernel = (KERNELS)->preCalc->data[(KERNELS)->subIndex]; /* Kernel to subtract */ \163 169 for (int v = -size; v <= size; v++) { \ 164 170 for (int u = -size; u <= size; u++) { \ 165 171 (TARGET)->kernel[v][u] += value * FUNC(preCalc->kernel[v][u]); \ 166 /* Subtract (0,0) kernel from other kernels to preserve photometric scaling */ \ 167 if ((KERNELS)->spatialOrder > 0 && i != (KERNELS)->subIndex) { \ 168 (TARGET)->kernel[v][u] += subValue * FUNC(subKernel->kernel[v][u]); \ 169 } \ 172 /* Photometric scaling is already built in to the precalculated kernel */ \ 170 173 } \ 171 174 } \ … … 207 210 ) 208 211 { 209 int numKernels = solution->n - 1;// Number of kernel basis functions212 int numKernels = kernels->num; // Number of kernel basis functions 210 213 assert(kernels->u->n == numKernels); 211 214 assert(kernels->v->n == numKernels); 212 assert(kernels->xOrder->n == numKernels);213 assert( kernels->yOrder->n == numKernels);215 int spatialOrder = kernels->spatialOrder; // Maximum spatial polynomial order 216 assert(solution->n == numKernels * polyTerms(spatialOrder) + polyTerms(kernels->bgOrder)); 214 217 215 218 // Ensure the subIndex for POIS kernels is what is expected 216 219 assert((kernels->type != PM_SUBTRACTION_KERNEL_POIS && 217 220 kernels->type != PM_SUBTRACTION_KERNEL_SPAM && 218 kernels->type != PM_SUBTRACTION_KERNEL_FRIES ) ||219 (kernels->u->data.S32[kernels->subIndex] == 0 && kernels->v->data.S32[kernels->subIndex] == 0&&220 kernels-> xOrder->data.S32[kernels->subIndex] == 0 &&221 kernels->yOrder->data.S32[kernels->subIndex] == 0));221 kernels->type != PM_SUBTRACTION_KERNEL_FRIES && 222 kernels->type != PM_SUBTRACTION_KERNEL_GUNK && 223 kernels->type != PM_SUBTRACTION_KERNEL_RINGS) || 224 (kernels->u->data.S32[kernels->subIndex] == 0 && kernels->v->data.S32[kernels->subIndex] == 0)); 222 225 223 226 int size = kernels->size; // Kernel half-size … … 228 231 229 232 for (int i = 0; i < numKernels; i++) { 230 int xOrder = kernels->xOrder->data.S32[i]; // Polynomial order in x 231 int yOrder = kernels->yOrder->data.S32[i]; // Polynomial order in y 232 float value = weightFunc(polyValues->data.F64[yOrder][xOrder] * 233 solution->data.F64[i]); // Value to sum 234 float subValue = weightFunc(-solution->data.F64[i]); // Value to subtract (actually add, because of -) 233 double value = 0.0; // Value of kernel coefficient 234 for (int yOrder = 0, index = i; yOrder <= spatialOrder; yOrder++) { 235 for (int xOrder = 0; xOrder <= spatialOrder - yOrder; xOrder++, index += numKernels) { 236 value += polyValues->data.F64[yOrder][xOrder] * solution->data.F64[index]; 237 } 238 } 239 235 240 switch (kernels->type) { 236 241 case PM_SUBTRACTION_KERNEL_POIS: { 237 242 int u = kernels->u->data.S32[i]; // Offset in x 238 243 int v = kernels->v->data.S32[i]; // Offset in y 239 kernel->kernel[v][u] += value;244 kernel->kernel[v][u] += weightFunc(value); 240 245 if (kernels->spatialOrder > 0 && i != kernels->subIndex) { 241 246 // The (0,0) element is subtracted from most kernels to preserve photometric scaling 242 kernel->kernel[0][0] += subValue;247 kernel->kernel[0][0] += weightFunc(- value); 243 248 } 244 249 break; … … 253 258 254 259 // Normalising sum of kernel component to unity 255 value /=weightFunc((uStop - uStart) * (vStop - vStart));260 float norm = 1.0 / weightFunc((uStop - uStart) * (vStop - vStart)); 256 261 257 262 for (int v = vStart; v <= vStop; v++) { 258 263 for (int u = uStart; u <= uStop; u++) { 259 kernel->kernel[v][u] += value; 264 kernel->kernel[v][u] += norm * weightFunc(value); 265 if (kernels->spatialOrder > 0 && i != kernels->subIndex) { 266 // The (0,0) element is subtracted from most kernels to preserve photometric scaling 267 kernel->kernel[0][0] += weightFunc(- value); 268 } 260 269 } 261 }262 if (kernels->spatialOrder > 0 && i != kernels->subIndex) {263 /* The (0,0) element is subtracted from most kernels to preserve photometric scaling */264 kernel->kernel[0][0] += subValue;265 270 } 266 271 break; … … 270 275 // Using pre-calculated function 271 276 psKernel *preCalc = kernels->preCalc->data[i]; // Precalculated values 277 // Iterating over the kernel 272 278 for (int v = -size; v <= size; v++) { 273 279 for (int u = -size; u <= size; u++) { 274 kernel->kernel[v][u] += value * weightFunc(preCalc->kernel[v][u]); 280 kernel->kernel[v][u] += weightFunc(preCalc->kernel[v][u] * value); 281 // Photometric scaling is built into the preCalc kernel --- no subtraction! 275 282 } 276 283 } 277 284 } else { 278 285 // Using delta function 286 bool subtract = (kernels->spatialOrder > 0 && i != kernels->subIndex); // Subtract (0,0)? 279 287 int u = kernels->u->data.S32[i]; // Offset in x 280 288 int v = kernels->v->data.S32[i]; // Offset in y 281 kernel->kernel[v][u] += value; 282 } 283 // The (0,0) kernel is subtracted from other kernels to preserve photometric scaling 284 if (kernels->spatialOrder > 0 && i != kernels->subIndex) { 285 kernel->kernel[0][0] += subValue; 286 } 287 break; 288 } 289 case PM_SUBTRACTION_KERNEL_ISIS: { 290 psKernel *preCalc = kernels->preCalc->data[i];// Precalculated values 291 psKernel *subKernel = kernels->preCalc->data[kernels->subIndex]; // Kernel to subtract 292 for (int v = -size; v <= size; v++) { 293 for (int u = -size; u <= size; u++) { 294 kernel->kernel[v][u] += value * weightFunc(preCalc->kernel[v][u]); 295 // The (0,0) kernel is subtracted from other kernels to preserve photometric scaling 296 if (kernels->spatialOrder > 0 && i != kernels->subIndex) { 297 kernel->kernel[v][u] += subValue * weightFunc(subKernel->kernel[v][u]); 298 } 289 kernel->kernel[v][u] += weightFunc(value); 290 if (subtract) { 291 // The (0,0) element is subtracted from most kernels to preserve photometric scaling 292 kernel->kernel[0][0] += weightFunc(- value); 299 293 } 300 294 } 301 295 break; 302 296 } 297 case PM_SUBTRACTION_KERNEL_ISIS: { 298 psKernel *preCalc = kernels->preCalc->data[i]; // Precalculated values 299 // Iterating over the kernel 300 for (int v = -size; v <= size; v++) { 301 for (int u = -size; u <= size; u++) { 302 kernel->kernel[v][u] += weightFunc(preCalc->kernel[v][u] * value); 303 // Photometric scaling is built into the preCalc kernel --- no subtraction! 304 } 305 } 306 break; 307 } 303 308 case PM_SUBTRACTION_KERNEL_RINGS: { 304 309 if (i == kernels->subIndex) { 305 kernel->kernel[0][0] += value;310 kernel->kernel[0][0] += weightFunc(value); 306 311 break; 307 312 } … … 311 316 psVector *poly = preCalc->data[2]; // Polynomial values 312 317 int num = uCoords->n; // Number of pixels 313 value /= weightFunc(num); // Normalising sum of kernel component to unity 318 314 319 for (int j = 0; j < num; j++) { 315 320 int u = uCoords->data.S32[j], v = vCoords->data.S32[j]; // Kernel coordinates 316 kernel->kernel[v][u] += value * weightFunc(poly->data.F32[j]);321 kernel->kernel[v][u] += weightFunc(value * poly->data.F32[j]); 317 322 } 318 319 // The (0,0) kernel is subtracted from other kernels to preserve photometric scaling 320 if (kernels->spatialOrder > 0) { 321 kernel->kernel[0][0] += subValue; 322 } 323 kernel->kernel[0][0] += weightFunc(- value * num); 323 324 break; 324 325 } … … 336 337 int index, // Kernel basis function index 337 338 int x, int y, // Pixel around which to convolve 338 const psImage *image, // Image to convolve 339 const psImage *polyValues // Spatial polynomial values 339 const psImage *image // Image to convolve 340 340 ) 341 341 { 342 int xOrder = kernels->xOrder->data.S32[index]; // Polynomial order in x343 int yOrder = kernels->yOrder->data.S32[index]; // Polynomial order in y344 double polyValue = polyValues->data.F64[yOrder][xOrder]; // Value of spatial polynomial345 346 342 switch (kernels->type) { 347 343 case PM_SUBTRACTION_KERNEL_POIS: { … … 349 345 int u = kernels->u->data.S32[index]; // Offset in x 350 346 int v = kernels->v->data.S32[index]; // Offset in y 351 double value = polyValue *image->data.F32[y + v][x + u]; // Value of convolution347 float value = image->data.F32[y + v][x + u]; // Value of convolution 352 348 if (kernels->spatialOrder > 0 && index != kernels->subIndex) { 353 349 // The (0,0) element is subtracted from most kernels to preserve photometric scaling … … 366 362 for (int v = vStart; v <= vStop; v++) { 367 363 for (int u = uStart; u <= uStop; u++) { 368 sum += polyValue *image->data.F32[y + v][x + u];364 sum += image->data.F32[y + v][x + u]; 369 365 } 370 366 } … … 377 373 } 378 374 case PM_SUBTRACTION_KERNEL_GUNK: { 379 double value; // The value to return380 375 if (index < kernels->inner) { 381 376 // Using pre-calculated function … … 388 383 } 389 384 } 390 value = polyValue * sum; 391 } else { 392 // Using delta function 393 int u = kernels->u->data.S32[index]; // Offset in x 394 int v = kernels->v->data.S32[index]; // Offset in y 395 value = polyValue * image->data.F32[y + v][x + u]; // Value of convolution 396 } 385 return sum; 386 } 387 // Using delta function 388 int u = kernels->u->data.S32[index]; // Offset in x 389 int v = kernels->v->data.S32[index]; // Offset in y 390 float value = image->data.F32[y + v][x + u]; // Value of convolution 397 391 // The (0,0) delta function is subtracted from most kernels to preserve photometric scaling 398 392 if (kernels->spatialOrder > 0 && index != kernels->subIndex) { … … 403 397 case PM_SUBTRACTION_KERNEL_ISIS: { 404 398 psKernel *kernel = kernels->preCalc->data[index]; // The convolution kernel 405 psKernel *subKernel = kernels->preCalc->data[kernels->subIndex]; // Kernel to subtract406 399 int size = kernels->size; // Kernel half-size 407 400 double sum = 0.0; // Accumulated sum from convolution 408 double sub = 0.0; // Accumulated sum to subtract409 401 for (int v = -size; v <= size; v++) { 410 402 for (int u = -size; u <= size; u++) { 411 403 sum += kernel->kernel[v][u] * image->data.F32[y + v][x + u]; 412 // The (0,0) kernel is subtracted from other kernels to preserve photometric scaling 413 if (kernels->spatialOrder > 0 && index != kernels->subIndex) { 414 sub += subKernel->kernel[v][u] * image->data.F32[y + v][x + u]; 415 } 404 // Photometric scaling is already built in to the precalculated kernel 416 405 } 417 406 } 418 return polyValue * sum - sub;407 return sum; 419 408 } 420 409 case PM_SUBTRACTION_KERNEL_RINGS: { 421 410 if (index == kernels->subIndex) { 422 return image->data.F32[ 0][0];411 return image->data.F32[y][x]; 423 412 } 424 413 psArray *preCalc = kernels->preCalc->data[index]; // Precalculated data … … 432 421 sum += image->data.F32[y + v][x + u] * poly->data.F32[j]; 433 422 } 434 sum /= (double)num; // Normalising sum of kernel component to unity435 423 // The (0,0) kernel is subtracted from other kernels to preserve photometric scaling 436 return polyValue * sum - image->data.F32[0][0];424 return sum - num * image->data.F32[y][x]; 437 425 } 438 426 default: … … 592 580 PS_ASSERT_PTR_NON_NULL(kernels, false); 593 581 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->v, false); 594 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->xOrder, false);595 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->yOrder, false);596 582 if (weight) { 597 583 PS_ASSERT_IMAGE_NON_NULL(weight, false); … … 599 585 PS_ASSERT_IMAGE_TYPE(weight, PS_TYPE_F32, false); 600 586 } 601 if (kernels->type == PM_SUBTRACTION_KERNEL_ISIS) {602 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->preCalc, false);603 }604 587 PS_ASSERT_INT_NONNEGATIVE(footprint, false); 605 588 … … 609 592 610 593 int spatialOrder = kernels->spatialOrder; // Maximum order of spatial variation 611 int numKernels = kernels->u->n; // Number of kernel basis functions 612 int numParams = numKernels + 1; // Total number of parameters to solve for: coefficient of each kernel 613 // basis function, and a constant background offset. 614 int bgIndex = numKernels; // Index in matrix for the background 615 psVector *convolutions = psVectorAlloc(numKernels, PS_TYPE_F64); // Convolutions for each kernel 594 int numKernels = kernels->num; // Number of kernel basis functions 595 int numSpatial = polyTerms(spatialOrder); // Number of spatial variations 596 int numBackground = polyTerms(kernels->bgOrder); // Number of background terms 597 598 // Total number of parameters to solve for: coefficient of each kernel basis function, multipled by the 599 // number of coefficients for the spatial polynomial, and a constant background offset. 600 int numParams = numKernels * numSpatial + numBackground; 601 int bgIndex = numParams - numBackground; // Index in matrix for the background 602 psVector *convolutions = psVectorAlloc(numKernels * numSpatial, PS_TYPE_F64); // Convolutions 616 603 617 604 // We iterate over each stamp, allocate the matrix and vectors if … … 637 624 psVectorInit(stampVector, 0.0); 638 625 639 // Pre-evaluated spatial polynomial values 640 psImage *polyValues = spatialPolyValues(spatialOrder, 641 2.0 * (float)(stamp->x - numCols/2.0) / (float)numCols, 642 2.0 * (float)(stamp->y - numRows/2.0) / (float)numRows); 626 float xNorm = 2.0 * (float)(stamp->x - numCols/2.0) / (float)numCols; // Normalised x coord 627 float yNorm = 2.0 * (float)(stamp->y - numRows/2.0) / (float)numRows; // Normalised y coord 628 psImage *polyValues = spatialPolyValues(spatialOrder, xNorm, yNorm); // Spatial polynomial terms 643 629 644 630 for (int y = stamp->y - footprint; y <= stamp->y + footprint; y++) { … … 648 634 649 635 // Generate the convolutions 650 for (int i = 0; i < numKernels; i++) { 651 convolutions->data.F64[i] = convolvePixel(kernels, i, x, y, reference, polyValues); 636 for (int j = 0; j < numKernels; j++) { 637 double value = convolvePixel(kernels, j, x, y, reference); // Value from convolution 638 // Generate the pseudo-convolutions from the spatial polynomial terms 639 for (int yOrder = 0, index = j; yOrder <= spatialOrder; yOrder++) { 640 for (int xOrder = 0; xOrder <= spatialOrder - yOrder; 641 xOrder++, index += numKernels) { 642 convolutions->data.F64[index] = value * polyValues->data.F64[yOrder][xOrder]; 643 } 644 } 652 645 } 653 646 654 647 // Generate the least-squares matrix and vector 655 648 // Upper diagonal only 656 for (int i = 0; i < numKernels; i++) {657 for (int j = i; j < numKernels; j++) {649 for (int i = 0; i < bgIndex; i++) { 650 for (int j = i; j < bgIndex; j++) { 658 651 stampMatrix->data.F64[i][j] += convolutions->data.F64[i] * 659 652 convolutions->data.F64[j] * invNoise2; … … 674 667 // Fill in lower diagonal of symmetric matrix, while checking for bad values 675 668 bool bad = false; // Are there bad values? 676 for (int i = 0; i < numKernels; i++) {669 for (int i = 0; i < bgIndex; i++) { 677 670 for (int j = 0; j < i; j++) { 678 671 stampMatrix->data.F64[i][j] = stampMatrix->data.F64[j][i]; … … 694 687 if (bad) { 695 688 stamp->status = PM_SUBTRACTION_STAMP_REJECTED; 696 psTrace("psModules.imcombine", 3, "Rejecting stamp %d because of bad equation\n", i); 689 psTrace("psModules.imcombine", 3, "Rejecting stamp %d (%d,%d) because of bad equation\n", 690 i, stamp->x, stamp->y); 697 691 } else { 698 692 stamp->status = PM_SUBTRACTION_STAMP_USED; … … 807 801 PS_ASSERT_VECTOR_TYPE(solution, PS_TYPE_F64, -1); 808 802 PS_ASSERT_PTR_NON_NULL(kernels, -1); 809 PS_ASSERT_VECTOR_SIZE(kernels->u, solution->n - 1, -1); 803 PS_ASSERT_VECTOR_SIZE(solution, kernels->num * polyTerms(kernels->spatialOrder) + 804 polyTerms(kernels->bgOrder), -1); 805 PS_ASSERT_VECTOR_SIZE(kernels->u, kernels->num, -1); 810 806 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->v, -1); 811 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->xOrder, -1);812 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->yOrder, -1);813 if (kernels->type == PM_SUBTRACTION_KERNEL_ISIS) {814 PS_ASSERT_ARRAY_NON_EMPTY(kernels->preCalc, -1);815 PS_ASSERT_ARRAY_SIZE(kernels->preCalc, solution->n - 1, -1);816 }817 807 818 808 // Measure deviations … … 836 826 int xStamp = stamp->x, yStamp = stamp->y; // Coordinates of stamp 837 827 838 // Precalulate polynomial values 839 psImage *polyValues = spatialPolyValues(kernels->spatialOrder, 840 2.0 * (float)(stamp->x - numCols/2.0) / (float)numCols, 841 2.0 * (float)(stamp->y - numRows/2.0) / (float)numRows); 828 float xNorm = 2.0 * (float)(stamp->x - numCols/2.0) / (float)numCols; // Normalised x coord 829 float yNorm = 2.0 * (float)(stamp->y - numRows/2.0) / (float)numRows; // Normalised y coord 830 psImage *polyValues = spatialPolyValues(kernels->spatialOrder, xNorm, yNorm); // Polynomial terms 842 831 843 832 #ifdef USE_FUNCTIONS_INSTEAD_OF_MACROS … … 878 867 } 879 868 deviations->data.F32[i] = sqrt(stats->sampleMean / 2.0); 880 psTrace("psModules.imcombine", 1, "Deviation for stamp %d: %f\n", i, deviations->data.F32[i]); 869 psTrace("psModules.imcombine", 1, "Deviation for stamp %d (%d,%d): %f\n", 870 i, stamp->x, stamp->y, deviations->data.F32[i]); 881 871 totalSquareDev += PS_SQR(deviations->data.F32[i]); 882 872 numStamps++; … … 896 886 if (stamp->status == PM_SUBTRACTION_STAMP_USED && deviations->data.F32[i] > limit) { 897 887 // Mask out the stamp in the image so you it's not found again 898 psTrace("psModules.imcombine", 3, "Rejecting stamp %d \n", i);888 psTrace("psModules.imcombine", 3, "Rejecting stamp %d (%d,%d)\n", i, stamp->x, stamp->y); 899 889 numRejected++; 900 890 for (int y = stamp->y - footprint; y <= stamp->y + footprint; y++) { … … 922 912 PS_ASSERT_VECTOR_NON_NULL(solution, NULL); 923 913 PS_ASSERT_PTR_NON_NULL(kernels, NULL); 914 PS_ASSERT_VECTOR_SIZE(solution, kernels->num * polyTerms(kernels->spatialOrder) + 915 polyTerms(kernels->bgOrder), NULL); 924 916 PS_ASSERT_FLOAT_WITHIN_RANGE(x, -1.0, 1.0, NULL); 925 917 PS_ASSERT_FLOAT_WITHIN_RANGE(y, -1.0, 1.0, NULL); 926 PS_ASSERT_VECTOR_SIZE(kernels->u, solution->n - 1, false);918 PS_ASSERT_VECTOR_SIZE(kernels->u, kernels->num, false); 927 919 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->v, NULL); 928 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->xOrder, NULL);929 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->yOrder, NULL);930 if (kernels->type == PM_SUBTRACTION_KERNEL_ISIS) {931 PS_ASSERT_ARRAY_NON_NULL(kernels->preCalc, NULL);932 PS_ASSERT_ARRAY_SIZE(kernels->preCalc, solution->n - 1, NULL);933 }934 920 935 921 // Precalulate polynomial values … … 958 944 PS_ASSERT_VECTOR_NON_NULL(solution, NULL); 959 945 PS_ASSERT_PTR_NON_NULL(kernels, NULL); 946 PS_ASSERT_VECTOR_SIZE(solution, kernels->num * polyTerms(kernels->spatialOrder) + 947 polyTerms(kernels->bgOrder), NULL); 960 948 PS_ASSERT_FLOAT_WITHIN_RANGE(x, -1.0, 1.0, NULL); 961 949 PS_ASSERT_FLOAT_WITHIN_RANGE(y, -1.0, 1.0, NULL); 962 PS_ASSERT_VECTOR_SIZE(kernels->u, solution->n - 1, false);950 PS_ASSERT_VECTOR_SIZE(kernels->u, kernels->num, false); 963 951 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->v, NULL); 964 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->xOrder, NULL);965 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->yOrder, NULL);966 if (kernels->type == PM_SUBTRACTION_KERNEL_ISIS) {967 PS_ASSERT_ARRAY_NON_NULL(kernels->preCalc, NULL);968 PS_ASSERT_ARRAY_SIZE(kernels->preCalc, solution->n - 1, NULL);969 }970 952 971 953 psArray *images = psArrayAlloc(solution->n - 1); // Images of each kernel to return … … 987 969 bool pmSubtractionConvolve(psImage **outImage, psImage **outWeight, psImage **outMask, 988 970 const psImage *inImage, const psImage *inWeight, const psImage *subMask, 989 psMaskType blank, const psVector *solution, const pmSubtractionKernels *kernels) 971 psMaskType blank, const psRegion *region, const psVector *solution, 972 const pmSubtractionKernels *kernels) 990 973 { 991 974 PS_ASSERT_IMAGE_NON_NULL(inImage, false); … … 1022 1005 PS_ASSERT_VECTOR_TYPE(solution, PS_TYPE_F64, false); 1023 1006 PS_ASSERT_PTR_NON_NULL(kernels, false); 1024 PS_ASSERT_VECTOR_SIZE(kernels->u, solution->n - 1, false); 1007 PS_ASSERT_VECTOR_SIZE(solution, kernels->num * polyTerms(kernels->spatialOrder) + 1008 polyTerms(kernels->bgOrder), -1); 1009 PS_ASSERT_VECTOR_SIZE(kernels->u, kernels->num, false); 1025 1010 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->v, false); 1026 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->xOrder, false); 1027 PS_ASSERT_VECTORS_SIZE_EQUAL(kernels->u, kernels->yOrder, false); 1028 if (kernels->type == PM_SUBTRACTION_KERNEL_ISIS) { 1029 PS_ASSERT_ARRAY_NON_NULL(kernels->preCalc, false); 1030 PS_ASSERT_ARRAY_SIZE(kernels->preCalc, solution->n - 1, false); 1031 } 1032 1033 int size = kernels->size; // Half-size of kernel 1011 if (region) { 1012 if (psRegionIsNaN(*region)) { 1013 psString string = psRegionToString(*region); 1014 psError(PS_ERR_BAD_PARAMETER_VALUE, true, "Input region (%s) contains NAN values", string); 1015 psFree(string); 1016 return false; 1017 } 1018 if (region->x0 < 0 || region->x1 > inImage->numCols || 1019 region->y0 < 0 || region->y1 > inImage->numRows) { 1020 psString string = psRegionToString(*region); 1021 psError(PS_ERR_BAD_PARAMETER_VALUE, true, "Input region (%s) does not fit in image (%dx%d)", 1022 string, inImage->numCols, inImage->numRows); 1023 psFree(string); 1024 return false; 1025 } 1026 } 1027 1034 1028 float background = solution->data.F64[solution->n-1]; // The difference in background 1035 1029 int numCols = inImage->numCols, numRows = inImage->numRows; // Image dimensions … … 1057 1051 } 1058 1052 1053 int size = kernels->size; // Half-size of kernel 1054 int fullSize = 2 * size + 1; // Full size of kernel 1059 1055 psImage *polyValues = NULL; // Pre-calculated polynomial values 1060 int fullSize = 2 * size + 1; // Full size of kernel1061 1056 1062 1057 psKernel *kernelImage = NULL; // Kernel for the image 1063 1058 psKernel *kernelWeight = NULL; // Kernel for the weight map 1064 1059 1065 for (int j = size; j < inImage->numRows - size; j += fullSize) { 1066 for (int i = size; i < inImage->numCols - size; i += fullSize) { 1060 // Get region for convolution: [xMin:xMax,yMin:yMax] 1061 int xMin = size, xMax = numCols - size; 1062 int yMin = size, yMax = numRows - size; 1063 if (region) { 1064 xMin = PS_MAX(region->x0, xMin); 1065 xMax = PS_MIN(region->x1, xMax); 1066 yMin = PS_MAX(region->y0, yMin); 1067 yMax = PS_MIN(region->y1, yMax); 1068 } 1069 1070 for (int j = yMin; j < yMax; j += fullSize) { 1071 for (int i = xMin; i < xMax; i += fullSize) { 1067 1072 1068 1073 // Only generate polynomial values every kernel footprint, since we have already assumed … … 1085 1090 #endif 1086 1091 1087 for (int y = j; y < PS_MIN(j + fullSize, numRows - size); y++) {1088 for (int x = i; x < PS_MIN(i + fullSize, numCols - size); x++) {1092 for (int y = j; y < PS_MIN(j + fullSize, yMax); y++) { 1093 for (int x = i; x < PS_MIN(i + fullSize, xMax); x++) { 1089 1094 // Check and propagate the kernel footprint, if required 1090 1095 if (subMask && (subMask->data.PS_TYPE_MASK_DATA[y][x] & … … 1123 1128 psFree(polyValues); 1124 1129 1125 // Mark the non-convolved part as blank 1126 for (int y = size; y < inImage->numRows - size; y++) { 1130 return true; 1131 } 1132 1133 bool pmSubtractionBorder(psImage *image, psImage *weight, psImage *mask, 1134 const pmSubtractionKernels *kernels, psMaskType blank) 1135 { 1136 PS_ASSERT_IMAGE_NON_NULL(image, false); 1137 PS_ASSERT_IMAGE_TYPE(image, PS_TYPE_F32, false); 1138 if (mask) { 1139 PS_ASSERT_IMAGE_NON_NULL(mask, false); 1140 PS_ASSERT_IMAGES_SIZE_EQUAL(mask, image, false); 1141 PS_ASSERT_IMAGE_TYPE(mask, PS_TYPE_MASK, false); 1142 } 1143 if (weight) { 1144 PS_ASSERT_IMAGE_NON_NULL(weight, false); 1145 PS_ASSERT_IMAGES_SIZE_EQUAL(weight, image, false); 1146 PS_ASSERT_IMAGE_TYPE(weight, PS_TYPE_F32, false); 1147 } 1148 1149 int numCols = image->numCols, numRows = image->numRows; // Image dimensions 1150 1151 int size = kernels->size; // Half-size of kernel 1152 for (int y = size; y < numRows - size; y++) { 1127 1153 for (int x = 0; x < size; x++) { 1128 markBlank( convImage, convMask, convWeight, x, y, blank);1154 markBlank(image, mask, weight, x, y, blank); 1129 1155 } 1130 1156 for (int x = numCols - size; x < numCols; x++) { 1131 markBlank( convImage, convMask, convWeight, x, y, blank);1157 markBlank(image, mask, weight, x, y, blank); 1132 1158 } 1133 1159 } 1134 1160 for (int y = 0; y < size; y++) { 1135 1161 for (int x = 0; x < numCols; x++) { 1136 markBlank( convImage, convMask, convWeight, x, y, blank);1162 markBlank(image, mask, weight, x, y, blank); 1137 1163 } 1138 1164 } 1139 1165 for (int y = numRows - size; y < numRows; y++) { 1140 1166 for (int x = 0; x < numCols; x++) { 1141 markBlank( convImage, convMask, convWeight, x, y, blank);1167 markBlank(image, mask, weight, x, y, blank); 1142 1168 } 1143 1169 } -
trunk/psModules/src/imcombine/pmSubtraction.h
r14330 r14455 6 6 * @author GLG, MHPCC 7 7 * 8 * @version $Revision: 1. 5$ $Name: not supported by cvs2svn $9 * @date $Date: 2007-0 7-20 01:53:52 $8 * @version $Revision: 1.6 $ $Name: not supported by cvs2svn $ 9 * @date $Date: 2007-08-09 20:25:52 $ 10 10 * Copyright 2004-207 Institute for Astronomy, University of Hawaii 11 11 */ … … 83 83 const psImage *subMask, ///< Subtraction mask (or NULL) 84 84 psMaskType blank, ///< Mask value for blank regions 85 const psRegion *region, ///< Region to convolve (or NULL) 85 86 const psVector *solution, ///< The solution vector 86 87 const pmSubtractionKernels *kernels ///< Kernel parameters 87 88 ); 88 89 90 /// Mark the non-convolved part of the image as blank 91 bool pmSubtractionBorder(psImage *image,///< Image 92 psImage *weight, ///< Weight map (or NULL) 93 psImage *mask, ///< Mask (or NULL) 94 const pmSubtractionKernels *kernels, ///< Kernel parameters (for kernel size) 95 psMaskType blank ///< Mask value for blank regions 96 ); 97 89 98 /// @} 90 99 #endif -
trunk/psModules/src/imcombine/pmSubtractionKernels.c
r14420 r14455 19 19 psFree(kernels->uStop); 20 20 psFree(kernels->vStop); 21 psFree(kernels->xOrder);22 psFree(kernels->yOrder);23 21 psFree(kernels->preCalc); 24 22 } … … 50 48 51 49 kernels->type = type; 50 kernels->num = numBasisFunctions; 52 51 kernels->u = psVectorAlloc(numBasisFunctions, PS_TYPE_S32); 53 52 kernels->v = psVectorAlloc(numBasisFunctions, PS_TYPE_S32); … … 55 54 kernels->uStop = NULL; 56 55 kernels->vStop = NULL; 57 kernels->xOrder = psVectorAlloc(numBasisFunctions, PS_TYPE_S32);58 kernels->yOrder = psVectorAlloc(numBasisFunctions, PS_TYPE_S32);59 56 kernels->subIndex = 0; 60 57 kernels->preCalc = NULL; … … 62 59 kernels->inner = 0; 63 60 kernels->spatialOrder = spatialOrder; 61 kernels->bgOrder = 0; 64 62 65 63 return kernels; … … 71 69 PS_ASSERT_INT_NONNEGATIVE(spatialOrder, NULL); 72 70 73 int num = PS_SQR(2 * size + 1) * (spatialOrder + 1) * (spatialOrder + 2) / 2; // Number of basis functions71 int num = PS_SQR(2 * size + 1); // Number of basis functions 74 72 75 73 pmSubtractionKernels *kernels = pmSubtractionKernelsAlloc(num, PM_SUBTRACTION_KERNEL_POIS, … … 81 79 // Generate a set of kernels for each (u,v) 82 80 for (int v = - size, index = 0; v <= size; v++) { 83 for (int u = - size; u <= size; u++) { 84 // Iterate over spatial order. This loop creates the terms for 85 // x^xOrder * y^yOrder such that (xOrder+yOrder) <= spatialOrder. 86 for (int xOrder = 0; xOrder <= spatialOrder; xOrder++) { 87 for (int yOrder = 0; yOrder <= spatialOrder - xOrder; yOrder++, index++) { 88 kernels->u->data.S32[index] = u; 89 kernels->v->data.S32[index] = v; 90 kernels->xOrder->data.S32[index] = xOrder; 91 kernels->yOrder->data.S32[index] = yOrder; 92 93 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d %d %d\n", index, 94 u, v, xOrder, yOrder); 95 } 96 } 97 } 98 } 99 100 kernels->subIndex = (num - (spatialOrder + 1) * (spatialOrder + 2) / 2) / 2; 81 for (int u = - size; u <= size; u++, index++) { 82 kernels->u->data.S32[index] = u; 83 kernels->v->data.S32[index] = v; 84 85 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d\n", index, u, v); 86 } 87 } 88 89 kernels->subIndex = num / 2; 101 90 assert(kernels->u->data.S32[kernels->subIndex] == 0 && 102 kernels->v->data.S32[kernels->subIndex] == 0 && 103 kernels->xOrder->data.S32[kernels->subIndex] == 0 && 104 kernels->yOrder->data.S32[kernels->subIndex] == 0); 91 kernels->v->data.S32[kernels->subIndex] == 0); 105 92 106 93 return kernels; … … 127 114 num += (gaussOrder + 1) * (gaussOrder + 2) / 2; 128 115 } 129 num *= (spatialOrder + 1) * (spatialOrder + 2) / 2;130 116 131 117 pmSubtractionKernels *kernels = pmSubtractionKernelsAlloc(num, PM_SUBTRACTION_KERNEL_ISIS, … … 138 124 kernels->widths = psVectorAlloc(num, PS_TYPE_F32); 139 125 kernels->preCalc = psArrayAlloc(num); 126 psKernel *subtract = NULL; // Kernel to subtract to maintain flux scaling 140 127 141 128 // Set the kernel parameters 142 129 for (int i = 0, index = 0; i < numGaussians; i++) { 130 float norm = 1.0 / (M_2_PI * sqrtf(sigmas->data.F32[i])); // Normalisation for Gaussian 143 131 // Iterate over (u,v) order 144 132 for (int uOrder = 0; uOrder <= orders->data.S32[i]; uOrder++) { 145 for (int vOrder = 0; vOrder <= orders->data.S32[i] - uOrder; vOrder++ ) {133 for (int vOrder = 0; vOrder <= orders->data.S32[i] - uOrder; vOrder++, index++) { 146 134 147 135 // Set the pre-calculated kernel … … 150 138 for (int v = -size; v <= size; v++) { 151 139 for (int u = -size; u <= size; u++) { 152 sum += preCalc->kernel[v][u] = power(u, uOrder) * power(v, vOrder) *140 sum += preCalc->kernel[v][u] = norm * power(u, uOrder) * power(v, vOrder) * 153 141 expf(-0.5 * (PS_SQR(u) + PS_SQR(v)) / PS_SQR(sigmas->data.F32[i])); 154 142 } 155 143 } 156 // Normalise sum of kernel component to unity 157 psBinaryOp(preCalc->image, preCalc->image, "*", psScalarAlloc(1.0/sum, PS_TYPE_F32)); 158 159 // Iterate over spatial order. This loop creates the terms for 160 // x^xOrder * y^yOrder such that (xOrder+yOrder) <= spatialOrder. 161 for (int xOrder = 0; xOrder <= spatialOrder; xOrder++) { 162 for (int yOrder = 0; yOrder <= spatialOrder - xOrder; yOrder++, index++) { 163 kernels->widths->data.F32[index] = sigmas->data.F32[i]; 164 kernels->u->data.S32[index] = uOrder; 165 kernels->v->data.S32[index] = vOrder; 166 kernels->xOrder->data.S32[index] = xOrder; 167 kernels->yOrder->data.S32[index] = yOrder; 168 kernels->preCalc->data[index] = psMemIncrRefCounter(preCalc); 169 170 psTrace("psModules.imcombine", 7, "Kernel %d: %f %d %d %d %d\n", index, 171 sigmas->data.F32[i], uOrder, vOrder, xOrder, yOrder); 144 if (index == 0) { 145 subtract = preCalc; 146 for (int v = -size; v <= size; v++) { 147 for (int u = -size; u <= size; u++) { 148 preCalc->kernel[v][u] /= sum; 149 } 150 } 151 } else if (uOrder % 2 == 0 && vOrder % 2 == 0) { 152 // Normalise sum of kernel component to unity for even functions 153 for (int v = -size; v <= size; v++) { 154 for (int u = -size; u <= size; u++) { 155 preCalc->kernel[v][u] = preCalc->kernel[v][u] / sum - subtract->kernel[v][u]; 156 } 172 157 } 173 158 } 174 159 175 psFree(preCalc); // Drop reference 160 kernels->widths->data.F32[index] = sigmas->data.F32[i]; 161 kernels->u->data.S32[index] = uOrder; 162 kernels->v->data.S32[index] = vOrder; 163 kernels->preCalc->data[index] = preCalc; 164 165 psTrace("psModules.imcombine", 7, "Kernel %d: %f %d %d\n", index, 166 sigmas->data.F32[i], uOrder, vOrder); 176 167 } 177 168 } 178 169 } 179 170 180 kernels->subIndex = 0; 181 assert(kernels->u->data.S32[kernels->subIndex] == 0 && 182 kernels->v->data.S32[kernels->subIndex] == 0 && 183 kernels->xOrder->data.S32[kernels->subIndex] == 0 && 184 kernels->yOrder->data.S32[kernels->subIndex] == 0); 171 kernels->subIndex = -1; 185 172 186 173 if (psTraceGetLevel("psModules.imcombine.kernel") >= 10) { … … 220 207 psTrace("psModules.imcombine", 3, "Inner: %d Outer: %d\n", numInner, numOuter); 221 208 222 int num = PS_SQR(2 * numTotal + 1) * 223 (spatialOrder + 1) * (spatialOrder + 2) / 2; // Number of basis functions 209 int num = PS_SQR(2 * numTotal + 1); // Number of basis functions 224 210 225 211 psTrace("psModules.imcombine", 3, "Number of basis functions: %d\n", num); … … 264 250 int uStop = u + widths->data.S32[numTotal + i]; // Width of pixel 265 251 266 for (int j = - numTotal; j <= numTotal; j++ ) {252 for (int j = - numTotal; j <= numTotal; j++, index++) { 267 253 int v = locations->data.S32[numTotal + j]; // Location of pixel 268 254 int vStop = v + widths->data.S32[numTotal + j]; // Width of pixel 269 255 270 // Iterate over spatial order. This loop creates the terms for 271 // x^xOrder * y^yOrder such that (xOrder+yOrder) <= spatialOrder. 272 for (int xOrder = 0; xOrder <= spatialOrder; xOrder++) { 273 for (int yOrder = 0; yOrder <= spatialOrder - xOrder; yOrder++, index++) { 274 kernels->u->data.S32[index] = u; 275 kernels->v->data.S32[index] = v; 276 kernels->uStop->data.S32[index] = uStop; 277 kernels->vStop->data.S32[index] = vStop; 278 kernels->xOrder->data.S32[index] = xOrder; 279 kernels->yOrder->data.S32[index] = yOrder; 280 281 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d %d %d %d %d\n", index, 282 u, uStop, v, vStop, xOrder, yOrder); 283 } 284 } 285 } 286 } 287 288 kernels->subIndex = (num - (spatialOrder + 1) * (spatialOrder + 2) / 2) / 2; 256 kernels->u->data.S32[index] = u; 257 kernels->v->data.S32[index] = v; 258 kernels->uStop->data.S32[index] = uStop; 259 kernels->vStop->data.S32[index] = vStop; 260 261 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d %d %d\n", index, 262 u, uStop, v, vStop); 263 } 264 } 265 266 kernels->subIndex = num / 2; 289 267 assert(kernels->u->data.S32[kernels->subIndex] == 0 && 290 268 kernels->v->data.S32[kernels->subIndex] == 0 && 291 269 kernels->uStop->data.S32[kernels->subIndex] == 0 && 292 kernels->vStop->data.S32[kernels->subIndex] == 0 && 293 kernels->xOrder->data.S32[kernels->subIndex] == 0 && 294 kernels->yOrder->data.S32[kernels->subIndex] == 0); 270 kernels->vStop->data.S32[kernels->subIndex] == 0); 295 271 296 272 psFree(locations); … … 327 303 psTrace("psModules.imcombine", 3, "Inner: %d Outer: %d\n", numInner, numOuter); 328 304 329 int num = PS_SQR(2 * numTotal + 1) * 330 (spatialOrder + 1) * (spatialOrder + 2) / 2; // Number of basis functions 305 int num = PS_SQR(2 * numTotal + 1); // Number of basis functions 331 306 332 307 psTrace("psModules.imcombine", 3, "Number of basis functions: %d\n", num); … … 369 344 int u = start->data.S32[numTotal + i]; // Location of pixel 370 345 int uStop = stop->data.S32[numTotal + i]; // Width of pixel 371 for (int j = - numTotal; j <= numTotal; j++ ) {346 for (int j = - numTotal; j <= numTotal; j++, index++) { 372 347 int v = start->data.S32[numTotal + j]; // Location of pixel 373 348 int vStop = stop->data.S32[numTotal + j]; // Width of pixel 374 349 375 // Iterate over spatial order. This loop creates the terms for 376 // x^xOrder * y^yOrder such that (xOrder+yOrder) <= spatialOrder. 377 for (int xOrder = 0; xOrder <= spatialOrder; xOrder++) { 378 for (int yOrder = 0; yOrder <= spatialOrder - xOrder; yOrder++, index++) { 379 kernels->u->data.S32[index] = u; 380 kernels->v->data.S32[index] = v; 381 kernels->uStop->data.S32[index] = uStop; 382 kernels->vStop->data.S32[index] = vStop; 383 kernels->xOrder->data.S32[index] = xOrder; 384 kernels->yOrder->data.S32[index] = yOrder; 385 386 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d %d %d %d %d\n", index, 387 u, uStop, v, vStop, xOrder, yOrder); 388 } 389 } 390 } 391 } 392 393 kernels->subIndex = (num - (spatialOrder + 1) * (spatialOrder + 2) / 2) / 2; 350 kernels->u->data.S32[index] = u; 351 kernels->v->data.S32[index] = v; 352 kernels->uStop->data.S32[index] = uStop; 353 kernels->vStop->data.S32[index] = vStop; 354 355 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d %d %d\n", index, 356 u, uStop, v, vStop); 357 } 358 } 359 360 kernels->subIndex = num / 2; 394 361 assert(kernels->u->data.S32[kernels->subIndex] == 0 && 395 362 kernels->v->data.S32[kernels->subIndex] == 0 && 396 363 kernels->uStop->data.S32[kernels->subIndex] == 0 && 397 kernels->vStop->data.S32[kernels->subIndex] == 0 && 398 kernels->xOrder->data.S32[kernels->subIndex] == 0 && 399 kernels->yOrder->data.S32[kernels->subIndex] == 0); 364 kernels->vStop->data.S32[kernels->subIndex] == 0); 400 365 401 366 psFree(start); … … 429 394 430 395 int numInner = PS_SQR(2 * inner + 1); // Number of inner kernel elements 431 int numSpatial = (spatialOrder + 1) * (spatialOrder + 2) / 2; // Number of spatial variations of a kernel 432 433 int num = (numGaussianVars + numInner) * numSpatial; // Total number of basis functions 396 397 int num = numGaussianVars + numInner; // Total number of basis functions 434 398 435 399 pmSubtractionKernels *kernels = pmSubtractionKernelsAlloc(num, PM_SUBTRACTION_KERNEL_GUNK, … … 440 404 psFree(params); 441 405 442 kernels->widths = psVectorAlloc(numGaussianVars * numSpatial, PS_TYPE_F32); 443 kernels->preCalc = psArrayAlloc(numGaussianVars * numSpatial); 444 kernels->inner = numGaussianVars * numSpatial; 406 kernels->widths = psVectorAlloc(numGaussianVars, PS_TYPE_F32); 407 kernels->preCalc = psArrayAlloc(numGaussianVars); 408 kernels->inner = numGaussianVars; 409 psKernel *subtract = NULL; // Kernel to subtract to maintain flux scaling 445 410 446 411 // Set the Gaussian kernel parameters 447 412 for (int i = 0, index = 0; i < numGaussians; i++) { 413 float norm = 1.0 / (M_2_PI * sqrtf(sigmas->data.F32[i])); // Normalisation for Gaussian 448 414 // Iterate over (u,v) order 449 415 for (int uOrder = 0; uOrder <= orders->data.S32[i]; uOrder++) { 450 for (int vOrder = 0; vOrder <= orders->data.S32[i] - uOrder; vOrder++) { 451 452 416 for (int vOrder = 0; vOrder <= orders->data.S32[i] - uOrder; vOrder++, index++) { 453 417 // Set the pre-calculated kernel 454 418 psKernel *preCalc = psKernelAlloc(-size, size, -size, size); … … 456 420 for (int v = -size; v <= size; v++) { 457 421 for (int u = -size; u <= size; u++) { 458 sum += preCalc->kernel[v][u] = power(u, uOrder) * power(v, vOrder) *422 sum += preCalc->kernel[v][u] = norm * power(u, uOrder) * power(v, vOrder) * 459 423 expf(-0.5 * (PS_SQR(u) + PS_SQR(v)) / PS_SQR(sigmas->data.F32[i])); 460 424 } 461 425 } 462 // Normalise sum of kernel component to unity 463 psBinaryOp(preCalc->image, preCalc->image, "*", psScalarAlloc(1.0/sum, PS_TYPE_F32)); 464 465 // Iterate over spatial order. This loop creates the terms for 466 // x^xOrder * y^yOrder such that (xOrder+yOrder) <= spatialOrder. 467 for (int xOrder = 0; xOrder <= spatialOrder; xOrder++) { 468 for (int yOrder = 0; yOrder <= spatialOrder - xOrder; yOrder++, index++) { 469 kernels->widths->data.F32[index] = sigmas->data.F32[i]; 470 kernels->u->data.S32[index] = uOrder; 471 kernels->v->data.S32[index] = vOrder; 472 kernels->xOrder->data.S32[index] = xOrder; 473 kernels->yOrder->data.S32[index] = yOrder; 474 kernels->preCalc->data[index] = psMemIncrRefCounter(preCalc); 475 476 psTrace("psModules.imcombine", 7, "Kernel %d: %f %d %d %d %d\n", index, 477 sigmas->data.F32[i], uOrder, vOrder, xOrder, yOrder); 426 if (index == 0) { 427 subtract = preCalc; 428 for (int v = -size; v <= size; v++) { 429 for (int u = -size; u <= size; u++) { 430 preCalc->kernel[v][u] /= sum; 431 } 432 } 433 } else if (uOrder % 2 == 0 && vOrder % 2 == 0) { 434 // Normalise sum of kernel component to unity for even functions 435 for (int v = -size; v <= size; v++) { 436 for (int u = -size; u <= size; u++) { 437 preCalc->kernel[v][u] = preCalc->kernel[v][u] / sum - subtract->kernel[v][u]; 438 } 478 439 } 479 440 } 480 441 481 psFree(preCalc); // Drop reference 442 kernels->widths->data.F32[index] = sigmas->data.F32[i]; 443 kernels->u->data.S32[index] = uOrder; 444 kernels->v->data.S32[index] = vOrder; 445 kernels->preCalc->data[index] = preCalc; 446 447 psTrace("psModules.imcombine", 7, "Kernel %d: %f %d %d\n", index, 448 sigmas->data.F32[i], uOrder, vOrder); 482 449 } 483 450 } 484 451 } 485 486 452 487 453 // Generate a grid set of kernels for each (u,v) 488 454 for (int v = - inner, index = kernels->inner; v <= inner; v++) { 489 for (int u = - inner; u <= inner; u++) { 490 // Iterate over spatial order. This loop creates the terms for 491 // x^xOrder * y^yOrder such that (xOrder+yOrder) <= spatialOrder. 492 for (int xOrder = 0; xOrder <= spatialOrder; xOrder++) { 493 for (int yOrder = 0; yOrder <= spatialOrder - xOrder; yOrder++, index++) { 494 kernels->u->data.S32[index] = u; 495 kernels->v->data.S32[index] = v; 496 kernels->xOrder->data.S32[index] = xOrder; 497 kernels->yOrder->data.S32[index] = yOrder; 498 499 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d %d %d\n", index, 500 u, v, xOrder, yOrder); 501 } 502 } 503 } 504 } 505 506 kernels->subIndex = kernels->inner + (numInner - 1) * numSpatial / 2; 455 for (int u = - inner; u <= inner; u++, index++) { 456 kernels->u->data.S32[index] = u; 457 kernels->v->data.S32[index] = v; 458 459 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d\n", index, u, v); 460 } 461 } 462 463 kernels->subIndex = inner + num / 2; 507 464 assert(kernels->u->data.S32[kernels->subIndex] == 0 && 508 kernels->v->data.S32[kernels->subIndex] == 0 && 509 kernels->xOrder->data.S32[kernels->subIndex] == 0 && 510 kernels->yOrder->data.S32[kernels->subIndex] == 0); 465 kernels->v->data.S32[kernels->subIndex] == 0); 511 466 512 467 return kernels; … … 543 498 int numRings = numOuter + numInner; // Number of rings (not including the central pixel) 544 499 int numPoly = (ringsOrder + 1) * (ringsOrder + 2) / 2; // Number of polynomial variants of each ring 545 int numSpatial = (spatialOrder + 1) * (spatialOrder + 2) / 2; // Number of spatial variations of a kernel 546 547 int num = (numRings * numPoly + 1) * numSpatial; // Total number of basis functions 500 501 int num = numRings * numPoly + 1; // Total number of basis functions 548 502 549 503 pmSubtractionKernels *kernels = pmSubtractionKernelsAlloc(num, PM_SUBTRACTION_KERNEL_RINGS, … … 589 543 // Iterate over (u,v) order 590 544 for (int uOrder = 0; uOrder <= (i == 0 ? 0 : ringsOrder); uOrder++) { 591 for (int vOrder = 0; vOrder <= (i == 0 ? 0 : ringsOrder) - uOrder; vOrder++ ) {545 for (int vOrder = 0; vOrder <= (i == 0 ? 0 : ringsOrder) - uOrder; vOrder++, index++) { 592 546 593 547 psArray *data = psArrayAlloc(3); // Container for data … … 604 558 } else { 605 559 int j = 0; // Index for data 560 double norm = 0.0; // Normalisation 606 561 for (int v = -size; v <= size; v++) { 607 562 int v2 = PS_SQR(v); // Square of v … … 616 571 uCoords->data.S32[j] = u; 617 572 vCoords->data.S32[j] = v; 618 poly->data.F32[j] = uPoly * vPoly;573 norm += poly->data.F32[j] = uPoly * vPoly; 619 574 620 575 psVectorExtend(uCoords, RINGS_BUFFER, 1); … … 629 584 } 630 585 } 586 // Normalise kernel component to unit sum 587 psBinaryOp(poly, poly, "*", psScalarAlloc(1.0 / norm, PS_TYPE_F32)); 588 631 589 } 632 590 633 591 psTrace("psModules.imcombine", 8, "%ld pixels in ring\n", uCoords->n); 634 592 635 // Iterate over spatial order. This loop creates the terms for 636 // x^xOrder * y^yOrder such that (xOrder+yOrder) <= spatialOrder. 637 for (int xOrder = 0; xOrder <= spatialOrder; xOrder++) { 638 for (int yOrder = 0; yOrder <= spatialOrder - xOrder; yOrder++, index++) { 639 kernels->preCalc->data[index] = psMemIncrRefCounter(data); 640 kernels->u->data.S32[index] = uOrder; 641 kernels->v->data.S32[index] = vOrder; 642 kernels->xOrder->data.S32[index] = xOrder; 643 kernels->yOrder->data.S32[index] = yOrder; 644 645 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d %d %d %d\n", index, 646 i, uOrder, vOrder, xOrder, yOrder); 647 } 648 } 649 psFree(data); 593 kernels->preCalc->data[index] = data; 594 kernels->u->data.S32[index] = uOrder; 595 kernels->v->data.S32[index] = vOrder; 596 597 psTrace("psModules.imcombine", 7, "Kernel %d: %d %d %d\n", index, 598 i, uOrder, vOrder); 650 599 } 651 600 } … … 653 602 654 603 kernels->subIndex = 0; 655 assert(kernels->xOrder->data.S32[kernels->subIndex] == 0 &&656 kernels->yOrder->data.S32[kernels->subIndex] == 0);657 604 658 605 return kernels; -
trunk/psModules/src/imcombine/pmSubtractionKernels.h
r14363 r14455 17 17 typedef struct { 18 18 pmSubtractionKernelsType type; ///< Type of kernels --- allowing the use of multiple kernels 19 long num; ///< Number of kernel components (not including the spatial ones) 19 20 psVector *u, *v; ///< Offset (for POIS) or polynomial order (for ISIS) 20 21 psVector *widths; ///< Gaussian widths (ISIS) or ring radius (RINGS) 21 22 psVector *uStop, *vStop; ///< Width of kernel element (SPAM,FRIES only) 22 psVector *xOrder, *yOrder; ///< Spatial Polynomial order (for all)23 23 int subIndex; ///< Index of kernel to be subtracted (to maintain flux conservation) 24 24 psArray *preCalc; ///< Array of images containing pre-calculated kernel (for ISIS) … … 26 26 int inner; ///< The size of an inner region 27 27 int spatialOrder; ///< The spatial order of the kernels 28 int bgOrder; ///< The order for the background fitting 28 29 } pmSubtractionKernels; 29 30 -
trunk/psModules/src/imcombine/pmSubtractionStamps.c
r14342 r14455 42 42 43 43 psArray *pmSubtractionFindStamps(psArray *stamps, const psImage *image, const psImage *subMask, 44 float threshold, float spacing)44 const psRegion *region, float threshold, float spacing) 45 45 { 46 46 PS_ASSERT_IMAGE_NON_NULL(image, NULL); … … 52 52 } 53 53 PS_ASSERT_FLOAT_LARGER_THAN(spacing, 0.0, NULL); 54 if (region) { 55 if (psRegionIsNaN(*region)) { 56 psString string = psRegionToString(*region); 57 psError(PS_ERR_BAD_PARAMETER_VALUE, true, "Input region (%s) contains NAN values", string); 58 psFree(string); 59 return false; 60 } 61 if (region->x0 < 0 || region->x1 > image->numCols || 62 region->y0 < 0 || region->y1 > image->numRows) { 63 psString string = psRegionToString(*region); 64 psError(PS_ERR_BAD_PARAMETER_VALUE, true, "Input region (%s) does not fit in image (%dx%d)", 65 string, image->numCols, image->numRows); 66 psFree(string); 67 return false; 68 } 69 } 54 70 55 // Size of image 56 int numRows = image->numRows; 57 int numCols = image->numCols; 71 int numRows = image->numRows, numCols = image->numCols; // Size of image 72 73 // Get region in which to find stamps: [xMin:xMax,yMin:yMax] 74 int xMin = 0, xMax = numCols, yMin = 0, yMax = numRows; 75 if (region) { 76 xMin = PS_MAX(region->x0, xMin); 77 xMax = PS_MIN(region->x1, xMax); 78 yMin = PS_MAX(region->y0, yMin); 79 yMax = PS_MIN(region->y1, yMax); 80 } 81 int xSize = xMax - xMin, ySize = yMax - yMin; // Size of region of interest 58 82 59 83 // Number of stamps 60 int xNumStamps = numCols/ spacing + 1;61 int yNumStamps = numRows/ spacing + 1;84 int xNumStamps = xSize / spacing + 1; 85 int yNumStamps = ySize / spacing + 1; 62 86 63 87 int numFound = 0; // Number of stamps found … … 92 116 93 117 // Bounds of region to search for stamp 94 int y Min = j * numRows/ (yNumStamps + 1);95 int y Max = (j + 1) * numRows/ (yNumStamps + 1) - 1;96 int x Min = i * numCols/ (xNumStamps + 1);97 int x Max = (i + 1) * numCols/ (xNumStamps + 1) - 1;98 assert(y Max < image->numRows && xMax < image->numCols && yMin >= 0 && xMin>= 0);118 int yStart = yMin + j * ySize / (yNumStamps + 1); 119 int yStop = yMin + (j + 1) * ySize / (yNumStamps + 1) - 1; 120 int xStart = xMin + i * xSize / (xNumStamps + 1); 121 int xStop = xMin + (i + 1) * xSize / (xNumStamps + 1) - 1; 122 assert(yStop < numRows && xStop < numCols && yStart >= 0 && xStart >= 0); 99 123 100 for (int y = y Min; y <= yMax; y++) {101 for (int x = x Min; x <= xMax; x++) {124 for (int y = yStart; y <= yStop ; y++) { 125 for (int x = xStart; x <= xStop ; x++) { 102 126 if ((!subMask || !(subMask->data.PS_TYPE_MASK_DATA[y][x] & 103 127 (PM_SUBTRACTION_MASK_BORDER | PM_SUBTRACTION_MASK_FOOTPRINT | -
trunk/psModules/src/imcombine/pmSubtractionStamps.h
r14195 r14455 26 26 const psImage *image, ///< Image for which to find stamps 27 27 const psImage *mask, ///< Mask 28 const psRegion *region, ///< Region in which to search for stamps 28 29 float threshold, ///< Threshold for stamps in the image 29 30 float spacing ///< Rough spacing for stamps
Note:
See TracChangeset
for help on using the changeset viewer.
