void FutureBucket::makeLive(Application& app) { checkState(); assert(!isLive()); assert(hasHashes()); auto& bm = app.getBucketManager(); if (hasOutputHash()) { setLiveOutput(bm.getBucketByHash(hexToBin256(getOutputHash()))); } else { assert(mState == FB_HASH_INPUTS); mInputCurrBucket = bm.getBucketByHash(hexToBin256(mInputCurrBucketHash)); mInputSnapBucket = bm.getBucketByHash(hexToBin256(mInputSnapBucketHash)); assert(mInputShadowBuckets.empty()); for (auto const& h : mInputShadowBucketHashes) { auto b = bm.getBucketByHash(hexToBin256(h)); assert(b); CLOG(DEBUG, "Bucket") << "Reconstituting shadow " << h; mInputShadowBuckets.push_back(b); } mState = FB_LIVE_INPUTS; startMerge(app); assert(isLive()); } }
FutureBucket::FutureBucket(Application& app, std::shared_ptr<Bucket> const& curr, std::shared_ptr<Bucket> const& snap, std::vector<std::shared_ptr<Bucket>> const& shadows) : mState(FB_LIVE_INPUTS) , mInputCurrBucket(curr) , mInputSnapBucket(snap) , mInputShadowBuckets(shadows) { // Constructed with a bunch of inputs, _immediately_ commence merging // them; there's no valid state for have-inputs-but-not-merging, the // presence of inputs implies merging, and vice-versa. assert(curr); assert(snap); mInputCurrBucketHash = binToHex(curr->getHash()); mInputSnapBucketHash = binToHex(snap->getHash()); for (auto const& b : mInputShadowBuckets) { mInputShadowBucketHashes.push_back(binToHex(b->getHash())); } startMerge(app); }
//static void GradientsMergeMosaic::mergeMosaic(imageListType_t const & rImageList_p, realType_t dBlackPoint_p, pcl_enum eType_p, int32 shrinkCount_p, int32 featherRadius_p, imageType_t &rResultImage_p, sumMaskImageType_t &rSumMaskImage_p) { Assert(rImageList_p.Length()>=1); bool firstImage=true; int nCols=0,nRows=0,nChannels=0; /// size and color space of first image imageType_t::color_space colorSpace; weightImageType_t countImageDx, countImageDy; /// number of pixels that contributed to sumImageDx,Dy in average mode imageType_t sumImageDx, sumImageDy; /// combined gradients in x and y direction. Note: these gradients are *between* the pixels /// of the original image, so size is one less then original image is direction of derivative int nImages=0; /// number of images read const int enlargeSize=1; // number of pixels added at the border TimeMessage startMergeMosaic("Gradient Domain Merge Mosaic"); TimeMessage startLoadImages("Loading images"); for(std::size_t i=0;i<rImageList_p.Length();++i){ imageType_t currentImage; int imageIndex=0; // allow for multi-image files while(loadFile(rImageList_p[i],imageIndex,currentImage)){ ++nImages; ++imageIndex; // expand image dimensions so I have sufficient border for morpological transform and convolution TimeMessage startEnlarge("creating border"); currentImage.CropBy(enlargeSize,enlargeSize,enlargeSize,enlargeSize); startEnlarge.Stop(); if(firstImage){ firstImage=false; // determine those parameters that must be shared by all images nCols=currentImage.Width(); nRows=currentImage.Height(); nChannels=currentImage.NumberOfChannels(); colorSpace=currentImage.ColorSpace(); //allocate necessary helper images rSumMaskImage_p.AllocateData(nCols,nRows); rSumMaskImage_p.ResetSelections(); rSumMaskImage_p.Black(); sumImageDx.AllocateData(nCols-1,nRows,nChannels,colorSpace); sumImageDx.ResetSelections(); sumImageDx.Black(); sumImageDy.AllocateData(nCols,nRows-1,nChannels,colorSpace); sumImageDy.ResetSelections(); sumImageDy.Black(); countImageDx.AllocateData(nCols-1,nRows); countImageDx.Black(); countImageDy.AllocateData(nCols,nRows-1); countImageDy.Black(); } else { // FIXME I wonder if I should check color space etc as well... // check if properties of this image are identical to those of the first image if(nCols!=currentImage.Width()) { throw Error("Current image width differs from first image width."); } else if(nRows!=currentImage.Height()) { throw Error("Current image height differs from first image height."); } else if(nChannels!=currentImage.NumberOfChannels()) { throw Error("Current image number of channels differs from first image number of channels."); } } TimeMessage startProcessImage("Processing Image"+String(nImages)); mergeMosaicProcessImage(currentImage,dBlackPoint_p,eType_p,shrinkCount_p,featherRadius_p,sumImageDx, sumImageDy ,rSumMaskImage_p,countImageDx, countImageDy); } } startLoadImages.Stop(); if (eType_p==GradientsMergeMosaicType::Average) { TimeMessage startAverage("Averaging images"); averageImage(eType_p,countImageDx,sumImageDx); averageImage(eType_p,countImageDy,sumImageDy); // we do not need count images any longer countImageDx.AllocateData(0,0); countImageDy.AllocateData(0,0); } // at this point: // sumImageDx: Average or overlay of gradients of images read in x direction // sumImageDy: Average or overlay of gradients of images read in y direction // rSumMaskImage_p: mask with different values for the different sources of images. 0 is background. // We use this later for information of the user, but it is not needed in the following process TimeMessage startMerge("Merging Images"); imageType_t laplaceImage; TimeMessage startLaplace("Creating Laplace image"); createLaplaceVonNeumannImage(sumImageDx,sumImageDy,laplaceImage); startLaplace.Stop(); TimeMessage startSolve("Solving Laplace"); solveImage(laplaceImage,rResultImage_p); startSolve.Stop(); startMerge.Stop(); rResultImage_p.ResetSelections(); #if 0 // for debugging laplaceImage // rResultImage_p.Assign(laplaceImage); rResultImage_p.Assign(sumImageDx); #else TimeMessage startEnlarge("shrinking border"); rResultImage_p.CropBy(-enlargeSize,-enlargeSize,-enlargeSize,-enlargeSize); rSumMaskImage_p.CropBy(-enlargeSize,-enlargeSize,-enlargeSize,-enlargeSize); startEnlarge.Stop(); #endif TimeMessage startRescale("Rescaling Result"); rResultImage_p.Rescale(); //FIXME something more clever? startRescale.Stop(); }