void GradientsHdrCompression::hdrCompression(realType_t maxGradient_p, realType_t minGradient_p, realType_t expGradient_p, bool bRescale01_p, imageType_t &rResultImage_p) const { TimeMessage startHdrCompression("Gradient Domain HDR Compression"); imageType_t li; { TimeMessage startClipValue("Determine clip values for gradients"); imageType_t dx(m_imageDx); imageType_t dy(m_imageDy); // now clip values realType_t minGradientX,maxGradientX; minMaxValImage(dx,minGradientX,maxGradientX); realType_t minGradientY,maxGradientY; minMaxValImage(dy,minGradientY,maxGradientY); realType_t minValue=Min(minGradientX, minGradientY); realType_t maxValue=Max(maxGradientX, maxGradientY); double rangeValue=Max(-minValue, maxValue); realType_t const clipRange=rangeValue*maxGradient_p; realType_t const zeroRange=rangeValue*minGradient_p; startClipValue.Stop(); TimeMessage startClip("Clipping Gradients"); clipImage(dx,-clipRange,clipRange); zeroRangeImage(dx,-zeroRange,zeroRange); clipImage(dy,-clipRange,clipRange); zeroRangeImage(dy,-zeroRange,zeroRange); startClip.Stop(); if(expGradient_p!=1.0){ TimeMessage start("Pow() transformation of gradients"); absPowImage(dx,expGradient_p); absPowImage(dy,expGradient_p); } TimeMessage startLaplace("Computing 2nd derivative"); createLaplaceVonNeumannImage(dx,dy,li); } TimeMessage startSolve("Solving image"); solveImage(li,rResultImage_p); startSolve.Stop(); TimeMessage startRescale("Rescaling image"); if(bRescale01_p){ rResultImage_p.Rescale(0.0,1.0); } else { rResultImage_p.Rescale(m_dMinVal,m_dMaxVal); } }
//static void GradientsMergeMosaic::mergeMosaic(imageListType_t const & rImageList_p, realType_t dBlackPoint_p, pcl_enum eType_p, int32 shrinkCount_p, int32 featherRadius_p, imageType_t &rResultImage_p, sumMaskImageType_t &rSumMaskImage_p) { Assert(rImageList_p.Length()>=1); bool firstImage=true; int nCols=0,nRows=0,nChannels=0; /// size and color space of first image imageType_t::color_space colorSpace; weightImageType_t countImageDx, countImageDy; /// number of pixels that contributed to sumImageDx,Dy in average mode imageType_t sumImageDx, sumImageDy; /// combined gradients in x and y direction. Note: these gradients are *between* the pixels /// of the original image, so size is one less then original image is direction of derivative int nImages=0; /// number of images read const int enlargeSize=1; // number of pixels added at the border TimeMessage startMergeMosaic("Gradient Domain Merge Mosaic"); TimeMessage startLoadImages("Loading images"); for(std::size_t i=0;i<rImageList_p.Length();++i){ imageType_t currentImage; int imageIndex=0; // allow for multi-image files while(loadFile(rImageList_p[i],imageIndex,currentImage)){ ++nImages; ++imageIndex; // expand image dimensions so I have sufficient border for morpological transform and convolution TimeMessage startEnlarge("creating border"); currentImage.CropBy(enlargeSize,enlargeSize,enlargeSize,enlargeSize); startEnlarge.Stop(); if(firstImage){ firstImage=false; // determine those parameters that must be shared by all images nCols=currentImage.Width(); nRows=currentImage.Height(); nChannels=currentImage.NumberOfChannels(); colorSpace=currentImage.ColorSpace(); //allocate necessary helper images rSumMaskImage_p.AllocateData(nCols,nRows); rSumMaskImage_p.ResetSelections(); rSumMaskImage_p.Black(); sumImageDx.AllocateData(nCols-1,nRows,nChannels,colorSpace); sumImageDx.ResetSelections(); sumImageDx.Black(); sumImageDy.AllocateData(nCols,nRows-1,nChannels,colorSpace); sumImageDy.ResetSelections(); sumImageDy.Black(); countImageDx.AllocateData(nCols-1,nRows); countImageDx.Black(); countImageDy.AllocateData(nCols,nRows-1); countImageDy.Black(); } else { // FIXME I wonder if I should check color space etc as well... // check if properties of this image are identical to those of the first image if(nCols!=currentImage.Width()) { throw Error("Current image width differs from first image width."); } else if(nRows!=currentImage.Height()) { throw Error("Current image height differs from first image height."); } else if(nChannels!=currentImage.NumberOfChannels()) { throw Error("Current image number of channels differs from first image number of channels."); } } TimeMessage startProcessImage("Processing Image"+String(nImages)); mergeMosaicProcessImage(currentImage,dBlackPoint_p,eType_p,shrinkCount_p,featherRadius_p,sumImageDx, sumImageDy ,rSumMaskImage_p,countImageDx, countImageDy); } } startLoadImages.Stop(); if (eType_p==GradientsMergeMosaicType::Average) { TimeMessage startAverage("Averaging images"); averageImage(eType_p,countImageDx,sumImageDx); averageImage(eType_p,countImageDy,sumImageDy); // we do not need count images any longer countImageDx.AllocateData(0,0); countImageDy.AllocateData(0,0); } // at this point: // sumImageDx: Average or overlay of gradients of images read in x direction // sumImageDy: Average or overlay of gradients of images read in y direction // rSumMaskImage_p: mask with different values for the different sources of images. 0 is background. // We use this later for information of the user, but it is not needed in the following process TimeMessage startMerge("Merging Images"); imageType_t laplaceImage; TimeMessage startLaplace("Creating Laplace image"); createLaplaceVonNeumannImage(sumImageDx,sumImageDy,laplaceImage); startLaplace.Stop(); TimeMessage startSolve("Solving Laplace"); solveImage(laplaceImage,rResultImage_p); startSolve.Stop(); startMerge.Stop(); rResultImage_p.ResetSelections(); #if 0 // for debugging laplaceImage // rResultImage_p.Assign(laplaceImage); rResultImage_p.Assign(sumImageDx); #else TimeMessage startEnlarge("shrinking border"); rResultImage_p.CropBy(-enlargeSize,-enlargeSize,-enlargeSize,-enlargeSize); rSumMaskImage_p.CropBy(-enlargeSize,-enlargeSize,-enlargeSize,-enlargeSize); startEnlarge.Stop(); #endif TimeMessage startRescale("Rescaling Result"); rResultImage_p.Rescale(); //FIXME something more clever? startRescale.Stop(); }
//static void GradientsHdrComposition::hdrComposition(imageListType_t const & rImageList_p, bool bKeepLog_p, realType_t dBias_p, imageType_t &rResultImage_p, numImageType_t &rDxImage_p, numImageType_t &rDyImage_p) { Assert(rImageList_p.Length()>=1); bool firstImage=true; int nCols=0,nRows=0,nChannels=0; /// size and color space of first image imageType_t::color_space colorSpace; imageType_t sumImageDx, sumImageDy; /// combined gradients in x and y direction. Note: these gradients are *between* the pixels /// of the original image, so size is one less then original image is direction of derivative int nImages=0; /// number of images read const int enlargeSize=1; // number of pixels added at the border TimeMessage startHdrComposition("Gradient Domain Hdr Composition"); TimeMessage startLoadImages("Loading images"); for(std::size_t i=0;i<rImageList_p.Length();++i){ imageType_t currentImage; int imageIndex=0; // allow for multi-image files while(loadFile(rImageList_p[i],imageIndex,currentImage)){ ++nImages; ++imageIndex; // expand image dimensions so I have sufficient border for morpological transform and convolution TimeMessage startEnlarge("creating border"); currentImage.CropBy(enlargeSize,enlargeSize,enlargeSize,enlargeSize); startEnlarge.Stop(); if(firstImage){ firstImage=false; // determine those parameters that must be shared by all images nCols=currentImage.Width(); nRows=currentImage.Height(); nChannels=currentImage.NumberOfChannels(); colorSpace=currentImage.ColorSpace(); //allocate necessary helper images rDxImage_p.AllocateData(nCols,nRows,nChannels,colorSpace); rDxImage_p.ResetSelections(); rDxImage_p.Black(); rDyImage_p.AllocateData(nCols,nRows,nChannels,colorSpace); rDyImage_p.ResetSelections(); rDyImage_p.Black(); sumImageDx.AllocateData(nCols-1,nRows,nChannels,colorSpace); sumImageDx.ResetSelections(); sumImageDx.Black(); sumImageDy.AllocateData(nCols,nRows-1,nChannels,colorSpace); sumImageDy.ResetSelections(); sumImageDy.Black(); } else { // FIXME I wonder if I should check color space etc as well... // check if properties of this image are identical to those of the first image if(nCols!=currentImage.Width()) { throw Error("Current image width differs from first image width."); } else if(nRows!=currentImage.Height()) { throw Error("Current image height differs from first image height."); } else if(nChannels!=currentImage.NumberOfChannels()) { throw Error("Current image number of channels differs from first image number of channels."); } } TimeMessage startProcessImage("Processing Image"+String(nImages)); hdrCompositionProcessImage(currentImage,nImages,dBias_p,0.0,1,sumImageDx, sumImageDy ,rDxImage_p,rDyImage_p); } } startLoadImages.Stop(); // at this point: // sumImageDx: max log gradient of images read in x direction // sumImageDy: max log gradient of images read in y direction // rSumMaskImage_p: mask with different values for the different sources of images. 0 is background. // We use this later for information of the user, but it is not needed in the following process TimeMessage startHdr("HDR Combining Images"); imageType_t laplaceImage; TimeMessage startLaplace("Creating Laplace image"); // eliminate gradients that come from singularities, i.e. <=0 pixels clipGradients(sumImageDx); clipGradients(sumImageDy); createLaplaceVonNeumannImage(sumImageDx,sumImageDy,laplaceImage); startLaplace.Stop(); TimeMessage startSolve("Solving Laplace"); solveImage(laplaceImage,rResultImage_p); startSolve.Stop(); rResultImage_p.ResetSelections(); if(!bKeepLog_p){ TimeMessage startExp("Performing Exp()"); realType_t dLogBias=std::log(1.0e-7); rResultImage_p.Rescale(dLogBias,0.0); //assumes result range is 1e-7..1 expImage(rResultImage_p, rResultImage_p); startExp.Stop(); } startHdr.Stop(); #if 0 // for debugging laplaceImage //rResultImage_p.Assign(laplaceImage); rResultImage_p.Assign(sumImageDx); #else TimeMessage startEnlarge("shrinking border"); rResultImage_p.CropBy(-enlargeSize,-enlargeSize,-enlargeSize,-enlargeSize); rDxImage_p.CropBy(-enlargeSize,-enlargeSize,-enlargeSize,-enlargeSize); rDyImage_p.CropBy(-enlargeSize,-enlargeSize,-enlargeSize,-enlargeSize); startEnlarge.Stop(); #endif TimeMessage startRescale("Rescaling Result"); rResultImage_p.Rescale(); //FIXME something more clever? startRescale.Stop(); }