//static void GradientsMergeMosaic::averageImage(pcl_enum eType_p, weightImageType_t const &rCountImage_p, imageType_t &rImage_p) { int const nCols=rImage_p.Width(); int const nRows=rImage_p.Height(); int const nChannels=rImage_p.NumberOfChannels(); switch(eType_p) { // FIXME sure there must be a faster way to do this via masked ops?!? case GradientsMergeMosaicType::Overlay: // do nothing break; case GradientsMergeMosaicType::Average: // FIXME use builtin PCL for this for(int channel=0;channel<nChannels;++channel){ for(int row=0;row<nRows;++row){ for(int col=0;col<nCols;++col){ if(rCountImage_p.Pixel(col,row)>0.0){ rImage_p.Pixel(col,row,channel)/=static_cast<realType_t>(rCountImage_p.Pixel(col,row)); } } } } break; default: Assert(false); } }
//static void GradientsMergeMosaic::featherMask(weightImageType_t &rMask_p, int32 featherRadius_p) { #if 0 // create test pattern int const nCols=rMask_p.Width(); int const nRows=rMask_p.Height(); int const nChannels=rMask_p.NumberOfChannels(); rMask_p.Black(); for(int channel=0;channel<nChannels;++channel){ for(int col=nCols*0.25;col<nCols*0.75;++col){ for(int row=0;row<nRows;row++){ rMask_p.Pixel(col,row,channel)=1.0; } } } #endif if(featherRadius_p<=0){ // nothing to do return; } // first, I need to shrink the mask, then convolve TimeMessage startFeatherErode("Erode mask for feathering"); // FIXME this is quite slow for featherRadius_p>20. // We would need a separable erode, promised by Juan... // erodeMask(rMask_p, featherRadius_p); // FIXME this is quick workaround, using separable convolution instead erodeMaskConvolve(rMask_p, featherRadius_p); startFeatherErode.Stop(); TimeMessage startFeatherConvolve("ConvolveMask for feathering"); convolveMask(rMask_p, featherRadius_p); startFeatherConvolve.Stop(); }
//static void GradientsMergeMosaic::erodeMaskConvolve(weightImageType_t & rMask_p,int32 shrinkCount_p) { int sideLength=1+2*shrinkCount_p; int nElements=sideLength*sideLength; SeparableFilter filter( sideLength,1.0/nElements); SeparableConvolution convolve(filter); rMask_p.ResetSelections(); convolve >> rMask_p; rMask_p.Binarize(0.999999); }
//static void GradientsMergeMosaic::addToMask(weightImageType_t const & rMask_p, pcl_enum eType_p, sumMaskImageType_t &rSumMaskImage_p) { int const nCols=rMask_p.Width(); int const nRows=rMask_p.Height(); switch(eType_p) { // FIXME sure there must be a faster way to do this via masked ops?!? case GradientsMergeMosaicType::Overlay: for(int row=0;row<nRows;++row){ for(int col=0;col<nCols;++col){ rSumMaskImage_p.Pixel(col,row)*=2.0; if(rMask_p.Pixel(col,row)>0.0) { // rSumMaskImage_p.Pixel(col,row)=rMask_p.Pixel(col,row); rSumMaskImage_p.Pixel(col,row)=rMask_p.Pixel(col,row)+(1.0-rMask_p.Pixel(col,row))*rSumMaskImage_p.Pixel(col,row); } } } break; case GradientsMergeMosaicType::Average: for(int row=0;row<nRows;++row){ for(int col=0;col<nCols;++col){ rSumMaskImage_p.Pixel(col,row)*=2.0; if(rMask_p.Pixel(col,row)>0.0) { rSumMaskImage_p.Pixel(col,row)+=rMask_p.Pixel(col,row); } } } break; default: Assert(false); } }
//static void GradientsBase::binarizeImage(imageType_t const & currentImage_p, realType_t dBlackPoint_p, weightImageType_t & rMaskImage_p) { #if 0 imageType_t image(currentImage_p); image.ResetSelections(); image.Binarize(dBlackPoint_p); // first channel image.ResetSelections(); image.SelectChannel(0); rMaskImage_p.ResetSelections(); rMaskImage_p.Assign(image); // compress into one channel for(int i=1;i<image.NumberOfChannels();++i){ image.SelectChannel(i); rMaskImage_p.Max(image); } #else int const nCols=currentImage_p.Width(); int const nRows=currentImage_p.Height(); int const nChannels=currentImage_p.NumberOfChannels(); rMaskImage_p.AllocateData(nCols,nRows); rMaskImage_p.Black(); for(int channel=0;channel<nChannels;++channel){ for(int row=0;row<nRows;++row){ for(int col=0;col<nCols;++col){ if(currentImage_p.Pixel(col,row,channel)>dBlackPoint_p) { rMaskImage_p.Pixel(col,row)=1.0; } } } } #endif }
///static void GradientsMergeMosaic::addBorder(weightImageType_t const & rFullMask_p, weightImageType_t &rShrinkedMask_p) { int const nCols=rShrinkedMask_p.Width(); int const nRows=rShrinkedMask_p.Height(); int const nChannels=rShrinkedMask_p.NumberOfChannels(); Assert(nCols==rFullMask_p.Width()); Assert(nRows==rFullMask_p.Height()); Assert(nChannels==rFullMask_p.NumberOfChannels()); // multiple channels really not necessary, but who cares... for(int channel=0;channel<nChannels;++channel){ for(int col=0;col<nCols;++col){ for(int row=0;row<nRows;row++){ if(rFullMask_p.Pixel(col,row,channel)!=0.0 && rShrinkedMask_p.Pixel(col,row,channel)==0){ rShrinkedMask_p.Pixel(col,row,channel)= -1.0; } } } } }
//static void GradientsMergeMosaic::mergeMosaicProcessImage(imageType_t const & rImage_p, realType_t dBlackPoint_p, pcl_enum eType_p, int32 shrinkCount_p, int32 featherRadius_p, imageType_t &rSumImageDx_p, imageType_t &rSumImageDy_p, sumMaskImageType_t &rSumMaskImage_p, weightImageType_t &rCountImageDx_p, weightImageType_t &rCountImageDy_p) { #ifdef DEBUG int const nCols=rImage_p.Width(); int const nRows=rImage_p.Height(); int const nChannels=rImage_p.NumberOfChannels(); #endif Assert(nCols==rSumImageDx_p.Width()+1); Assert(nRows==rSumImageDx_p.Height()); Assert(nChannels==rSumImageDx_p.NumberOfChannels()); Assert(nCols==rSumImageDy_p.Width()); Assert(nRows==rSumImageDy_p.Height()+1); Assert(nChannels==rSumImageDy_p.NumberOfChannels()); Assert(nCols==rSumMaskImage_p.Width()); Assert(nRows==rSumMaskImage_p.Height()); Assert(1==rSumMaskImage_p.NumberOfChannels()); Assert(eType_p!=GradientsMergeMosaicType::Average || nCols==rCountImageDx_p.Width()+1); Assert(eType_p!=GradientsMergeMosaicType::Average || nRows==rCountImageDx_p.Height()); Assert(eType_p!=GradientsMergeMosaicType::Average || 1==rCountImageDx_p.NumberOfChannels()); Assert(eType_p!=GradientsMergeMosaicType::Average || nCols==rCountImageDy_p.Width()); Assert(eType_p!=GradientsMergeMosaicType::Average || nRows==rCountImageDy_p.Height()+1); Assert(eType_p!=GradientsMergeMosaicType::Average || 1==rCountImageDy_p.NumberOfChannels()); Assert(shrinkCount_p>=0); Assert(featherRadius_p>=0); Assert(dBlackPoint_p>=0.0); weightImageType_t maskImage; TimeMessage startAddImage("Adding image to data"); TimeMessage startBinarize("Binarize Image"); binarizeImage(rImage_p,dBlackPoint_p,maskImage); // save this for border computation later weightImageType_t fullMask(maskImage); startBinarize.Stop(); // we are doing this because image after StarAlign usually contain aliased pixels. // These must not to be used during merge. TimeMessage startShrink("Shrinking mask"); erodeMask(maskImage,shrinkCount_p); startShrink.Stop(); TimeMessage startFeather("Feathering mask"); featherMask(maskImage,featherRadius_p); startFeather.Stop(); TimeMessage startBorder("Computing border"); addBorder(fullMask,maskImage); fullMask.AllocateData(0,0); // save memory startBorder.Stop(); TimeMessage startSumMask("Creating combined mask"); addToMask(maskImage,eType_p,rSumMaskImage_p); startSumMask.Stop(); TimeMessage startAddGradients("Adding gradients data"); addToImage(rImage_p,eType_p,maskImage,rSumImageDx_p,rSumImageDy_p,rCountImageDx_p, rCountImageDy_p); startAddGradients.Stop(); }
//static void GradientsMergeMosaic::addToImage(imageType_t const & rImage_p,pcl_enum eType_p, weightImageType_t const & rMask_p, imageType_t &rSumImageDx_p,imageType_t &rSumImageDy_p, weightImageType_t &rCountImageDx_p, weightImageType_t &rCountImageDy_p) { int const nCols=rImage_p.Width(); int const nRows=rImage_p.Height(); int const nChannels=rImage_p.NumberOfChannels(); imageType_t dxImage, dyImage; const double zeroLimit=0.0; /// limit for weight that is considered zero TimeMessage startDx("Creating Dx"); createDxImage(rImage_p,dxImage); startDx.Stop(); TimeMessage startAddDx("Adding Dx"); // transfer useful dx pixels // FIXME this is a relatively slow loop. Think about making it faster for(int row=0;row<nRows;++row){ for(int col=0;col<nCols-1;++col){ if(rMask_p.Pixel(col,row)>zeroLimit && rMask_p.Pixel(col+1,row)>zeroLimit) { // we are inside of image realType_t weight=(rMask_p.Pixel(col,row)+rMask_p.Pixel(col+1,row))/2.0; if(eType_p==GradientsMergeMosaicType::Average){ if(rCountImageDx_p.Pixel(col,row)<=0.0){ // first foreground pixel on this location for(int channel=0;channel<nChannels;++channel){ rSumImageDx_p.Pixel(col,row,channel)=dxImage.Pixel(col,row,channel)*weight; } } else { // there have been other pixels. Create average for(int channel=0;channel<nChannels;++channel){ rSumImageDx_p.Pixel(col,row,channel)+=dxImage.Pixel(col,row,channel)*weight; } } rCountImageDx_p.Pixel(col,row)+=weight; } else { // type overlay, last gradient wins if(rCountImageDx_p.Pixel(col,row)<=0.0){ // first foreground pixel on this location for(int channel=0;channel<nChannels;++channel){ rSumImageDx_p.Pixel(col,row,channel)=dxImage.Pixel(col,row,channel); } rCountImageDx_p.Pixel(col,row)=1.0; //mark as used } else { // there have been other pixels. Blend for(int channel=0;channel<nChannels;++channel){ rSumImageDx_p.Pixel(col,row,channel)=dxImage.Pixel(col,row,channel)*weight+rSumImageDx_p.Pixel(col,row,channel)*(1.0-weight); } } } //if type } else if(rCountImageDx_p.Pixel(col,row)==0.0 && (rMask_p.Pixel(col,row)<0.0 || rMask_p.Pixel(col+1,row)<0.0)) { // we are at border of image and dont have values there. Just copy in gradient so if nothing else comes in, we have at least the border for(int channel=0;channel<nChannels;++channel){ rSumImageDx_p.Pixel(col,row,channel)=dxImage.Pixel(col,row,channel); } // add if first should win. Otherwise last will win //rCountImageDx_p.Pixel(col,row)=-1.0; // mark as background already occupied } } //for col } //for row dxImage.AllocateData(0,0); //save some memory startAddDx.Stop(); TimeMessage startDy("Creating Dy"); // transfer useful dy pixels createDyImage(rImage_p,dyImage); startDy.Stop(); TimeMessage startAddDy("Adding Dy"); for(int row=0;row<nRows-1;++row){ for(int col=0;col<nCols;++col){ if(rMask_p.Pixel(col,row)>zeroLimit && rMask_p.Pixel(col,row+1)>zeroLimit) { realType_t weight=(rMask_p.Pixel(col,row)+rMask_p.Pixel(col,row+1))/2.0; if(eType_p==GradientsMergeMosaicType::Average){ // type average and inside if(rCountImageDy_p.Pixel(col,row)<=0.0){ // first foreground pixel on this location for(int channel=0;channel<nChannels;++channel){ rSumImageDy_p.Pixel(col,row,channel)=dyImage.Pixel(col,row,channel)*weight; } } else { // we already were there. Blend for(int channel=0;channel<nChannels;++channel){ rSumImageDy_p.Pixel(col,row,channel)+=dyImage.Pixel(col,row,channel)*weight; } } rCountImageDy_p.Pixel(col,row)+=weight; } else { // type overlay and inside, last gradient wins if(rCountImageDy_p.Pixel(col,row)<=0.0){ // first foreground pixel on this location for(int channel=0;channel<nChannels;++channel){ rSumImageDy_p.Pixel(col,row,channel)=dyImage.Pixel(col,row,channel); } rCountImageDy_p.Pixel(col,row)=1.0; //mark as used } else { // we have been there, merge for(int channel=0;channel<nChannels;++channel){ rSumImageDy_p.Pixel(col,row,channel)=dyImage.Pixel(col,row,channel)*weight+rSumImageDy_p.Pixel(col,row,channel)*(1.0-weight); } } } //if type } else if(rCountImageDy_p.Pixel(col,row)==0.0 && (rMask_p.Pixel(col,row)<0.0 || rMask_p.Pixel(col,row+1)<0.0)){ // we are outside of image and dont have values there. Just copy in gradient for(int channel=0;channel<nChannels;++channel){ rSumImageDy_p.Pixel(col,row,channel)=dyImage.Pixel(col,row,channel); } // add if first should win. Otherwise last will win //rCountImageDy_p.Pixel(col,row)=-1.0; } } //for col } //for row startAddDy.Stop(); }
//static void GradientsHdrComposition::addToImage(imageType_t const & rImage_p, int imageNum_p, weightImageType_t const & rMask_p, imageType_t &rSumImageDx_p,imageType_t &rSumImageDy_p, numImageType_t &rDxImage_p, numImageType_t &rDyImage_p) { int const nCols=rImage_p.Width(); int const nRows=rImage_p.Height(); int const nChannels=rImage_p.NumberOfChannels(); Assert(nCols==rSumImageDx_p.Width()+1); Assert(nRows==rSumImageDx_p.Height()); Assert(nChannels==rSumImageDx_p.NumberOfChannels()); Assert(nCols==rSumImageDy_p.Width()); Assert(nRows==rSumImageDy_p.Height()+1); Assert(nChannels==rSumImageDy_p.NumberOfChannels()); Assert(nCols==rDxImage_p.Width()); Assert(nRows==rDxImage_p.Height()); Assert(nChannels==rDxImage_p.NumberOfChannels()); Assert(nCols==rDyImage_p.Width()); Assert(nRows==rDyImage_p.Height()); Assert(nChannels==rDyImage_p.NumberOfChannels()); imageType_t dxImage, dyImage; const double zeroLimit=0.0; /// limit for weight that is considered zero // handle Dx TimeMessage startDx("Creating Dx"); createDxImage(rImage_p,dxImage); startDx.Stop(); TimeMessage startAddDx("Adding Dx to gradients"); // transfer useful dx pixels for(int row=0;row<nRows;++row){ for(int col=0;col<nCols-1;++col){ if(rMask_p.Pixel(col,row)>zeroLimit && rMask_p.Pixel(col+1,row)>zeroLimit) { // we are inside of image, and have useful gradient there for(int channel=0;channel<nChannels;++channel){ realType_t currentVal=dxImage.Pixel(col,row,channel); realType_t sumVal=rSumImageDx_p.Pixel(col,row,channel); if(std::abs(currentVal)>std::abs(sumVal)){ rSumImageDx_p.Pixel(col,row,channel)=currentVal; rDxImage_p.Pixel(col,row,channel)=imageNum_p; } //if abs } // for chan // FIXME may need to add border handling } //if inside } //for col } //for row dxImage.AllocateData(0,0); //save some memory startAddDx.Stop(); // handle Dy just as dx TimeMessage startDy("Creating Dy"); createDyImage(rImage_p,dyImage); startDy.Stop(); TimeMessage startAddDy("Adding Dxy to gradients"); // transfer useful dy pixels for(int row=0;row<nRows-1;++row){ for(int col=0;col<nCols;++col){ if(rMask_p.Pixel(col,row)>zeroLimit && rMask_p.Pixel(col,row+1)>zeroLimit) { for(int channel=0;channel<nChannels;++channel){ // we are inside of image, and have useful gradient there realType_t currentVal=dyImage.Pixel(col,row,channel); realType_t sumVal=rSumImageDy_p.Pixel(col,row,channel); if(std::abs(currentVal)>std::abs(sumVal)){ rSumImageDy_p.Pixel(col,row,channel)=currentVal; rDyImage_p.Pixel(col,row,channel)=imageNum_p; } //if abs() } // for chan // FIXME may need to add border handling } // if inside } //for col } //for row startAddDy.Stop(); }