Exemple #1
0
vector<UIntSet> getHDRStacks(const PanoramaData & pano, UIntSet allImgs, PanoramaOptions opts)
{
    vector<UIntSet> result;

    // if no images are available, return empty result vector
    if ( allImgs.empty() )
    {
        return result;
    }

    UIntSet stack;

    CalculateImageOverlap overlap(&pano);
    overlap.calculate(10);  // we are testing 10*10=100 points
    do {
        unsigned srcImg = *(allImgs.begin());
        stack.insert(srcImg);
        allImgs.erase(srcImg);

        // find all images that have a suitable overlap.
        for (UIntSet::iterator it = allImgs.begin(); it !=  allImgs.end(); ) {
            unsigned srcImg2 = *it;
            ++it;
            if(overlap.getOverlap(srcImg,srcImg2)>opts.outputStacksMinOverlap)
            {
                stack.insert(srcImg2);
                allImgs.erase(srcImg2);
            }
        }
        result.push_back(stack);
        stack.clear();
    } while (allImgs.size() > 0);

    return result;
}
bool TrueTypeEmbeddedFontWriter::AddComponentGlyphs(unsigned int inGlyphID,UIntSet& ioComponents)
{
	GlyphEntry* glyfTableEntry;
	UIntList::iterator itComponentGlyphs;
	bool isComposite = false;

	if(inGlyphID >= mTrueTypeInput.mMaxp.NumGlyphs)
	{
		TRACE_LOG2("TrueTypeEmbeddedFontWriter::AddComponentGlyphs, error, requested glyph index %ld is larger than the maximum glyph index for this font which is %ld. ",inGlyphID,mTrueTypeInput.mMaxp.NumGlyphs-1);
		return false;
	}

	glyfTableEntry = mTrueTypeInput.mGlyf[inGlyphID];
	if(glyfTableEntry != NULL && glyfTableEntry->mComponentGlyphs.size() > 0)
	{
		isComposite = true;
		for(itComponentGlyphs = glyfTableEntry->mComponentGlyphs.begin(); 
				itComponentGlyphs != glyfTableEntry->mComponentGlyphs.end(); 
				++itComponentGlyphs)
		{
				ioComponents.insert(*itComponentGlyphs);
				AddComponentGlyphs(*itComponentGlyphs,ioComponents);
		}
	}
	return isComposite;
}
EStatusCode CFFEmbeddedFontWriter::AddDependentGlyphs(UIntVector& ioSubsetGlyphIDs)
{
	EStatusCode status = PDFHummus::eSuccess;
	UIntSet glyphsSet;
	UIntVector::iterator it = ioSubsetGlyphIDs.begin();
	bool hasCompositeGlyphs = false;

	for(;it != ioSubsetGlyphIDs.end() && PDFHummus::eSuccess == status; ++it)
	{
		bool localHasCompositeGlyphs;
		status = AddComponentGlyphs(*it,glyphsSet,localHasCompositeGlyphs);
		hasCompositeGlyphs |= localHasCompositeGlyphs;
	}

	if(hasCompositeGlyphs)
	{
		UIntSet::iterator itNewGlyphs;

		for(it = ioSubsetGlyphIDs.begin();it != ioSubsetGlyphIDs.end(); ++it)
			glyphsSet.insert(*it);

		ioSubsetGlyphIDs.clear();
		for(itNewGlyphs = glyphsSet.begin(); itNewGlyphs != glyphsSet.end(); ++itNewGlyphs)
			ioSubsetGlyphIDs.push_back(*itNewGlyphs);
		
		sort(ioSubsetGlyphIDs.begin(),ioSubsetGlyphIDs.end());
	}	
	return status;
}
void OptimizePhotometricPanel::OnOptimizeButton(wxCommandEvent & e)
{
    DEBUG_TRACE("");
    // run optimizer
    // take the OptimizeVector from somewhere.

    //OptimizeVector optvars = getOptimizeVector();
    //m_pano->setOptimizeVector(optvars);


    UIntSet imgs;
    if (m_only_active_images_cb->IsChecked()) {
        // use only selected images.
        imgs = m_pano->getActiveImages();
        if (imgs.size() == 0) {
            //FIXME: Pop-up a dialog stating no images have been selected for optimization.
            return;
        } 
    } else {
        for (unsigned int i = 0 ; i < m_pano->getNrOfImages(); i++) {
                imgs.insert(i);
        }
    }
    runOptimizer(imgs);
}
    PTOptEstimator(PanoramaData & pano, int i1, int i2, double maxError,
		   bool optHFOV, bool optB)
    {
	m_maxError = maxError;

	UIntSet imgs;
	imgs.insert(i1);
	imgs.insert(i2);
	m_localPano = (pano.getNewSubset(imgs)); // don't forget to delete
	m_li1 = (i1 < i2) ? 0 : 1;
	m_li2 = (i1 < i2) ? 1 : 0;
	// get control points
	m_cps  = m_localPano->getCtrlPoints();
	// only use 2D control points
	BOOST_FOREACH(ControlPoint & kp, m_cps) {
	    if (kp.mode == ControlPoint::X_Y) {
		m_xy_cps.push_back(kp);
	    }
	}
	
	if (optHFOV)
	    m_optvars.push_back(OptVarSpec(0,std::string("v")));
	if (optB)
	    m_optvars.push_back(OptVarSpec(0,std::string("b")));
	m_optvars.push_back(OptVarSpec(m_li2,"r"));
	m_optvars.push_back(OptVarSpec(m_li2,"p"));
	m_optvars.push_back(OptVarSpec(m_li2,"y"));
	if (optHFOV)
	    m_optvars.push_back(OptVarSpec(0,"v"));
	if (optB)
	    m_optvars.push_back(OptVarSpec(0,"b"));

	/** optimisation for first pass */
	m_opt_first_pass.resize(2);
	m_opt_first_pass[1].insert("r");
	m_opt_first_pass[1].insert("p");
	m_opt_first_pass[1].insert("y");

	/** optimisation for second pass */
	if (optHFOV || optB) {
	    m_opt_second_pass = m_opt_first_pass;
	    if (optHFOV)
		m_opt_second_pass[0].insert("v");
	    if (optB)
		m_opt_second_pass[0].insert("b");
	}

	// number of points required for estimation
	m_numForEstimate = (m_optvars.size()+1)/2;	
			    
	// extract initial parameters from pano
	m_initParams.resize(m_optvars.size());
	int i=0;
	BOOST_FOREACH(OptVarSpec & v, m_optvars) {
	    m_initParams[i] = v.get(*m_localPano);
	    DEBUG_DEBUG("get init var: " << v.m_name << ", " << v.m_img << ": " << m_initParams[i]);
	    i++;
	}
Exemple #6
0
UIntSet getImagesinROI (const PanoramaData& pano, const UIntSet activeImages)
{
    UIntSet images;
    PanoramaOptions opts = pano.getOptions();
    for (UIntSet::const_iterator it = activeImages.begin(); it != activeImages.end(); ++it)
    {
        vigra::Rect2D roi = estimateOutputROI(pano, opts, *it);
        if (! (roi.isEmpty())) {
            images.insert(*it);
        }
    }
    return images;
}
void AssistantPanel::OnCropFactorChanged(wxCommandEvent & e)
{
    wxString text = m_cropFactorText->GetValue();
    DEBUG_INFO("crop factor: " << text.mb_str(wxConvLocal));
    double val;
    if (!str2double(text, val)) {
        return;
    }

    UIntSet images;
    images.insert(0);
    GlobalCmdHist::getInstance().addCommand(
        new PT::UpdateCropFactorCmd(*m_pano,images,val)
    );
}
Exemple #8
0
vector<UIntSet> getExposureLayers(const PanoramaData & pano, UIntSet allImgs, PanoramaOptions opts)
{
    vector<UIntSet> result;

    // if no images are available, return empty result vector
    if ( allImgs.empty() )
    {
        return result;
    }

    UIntSet stack;

    do {
        unsigned srcImg = *(allImgs.begin());
        stack.insert(srcImg);
        allImgs.erase(srcImg);

        // find all images that have a suitable overlap.
        SrcPanoImage simg = pano.getSrcImage(srcImg);
        double maxEVDiff = opts.outputLayersExposureDiff;
        for (UIntSet::iterator it = allImgs.begin(); it !=  allImgs.end(); ) {
            unsigned srcImg2 = *it;
            ++it;
            SrcPanoImage simg2 = pano.getSrcImage(srcImg2);
            if ( fabs(simg.getExposureValue() - simg2.getExposureValue()) < maxEVDiff )
            {
                stack.insert(srcImg2);
                allImgs.erase(srcImg2);
            }
        }
        result.push_back(stack);
        stack.clear();
    } while (allImgs.size() > 0);

    return result;
}
void AssistantPanel::OnLoadLens(wxCommandEvent & e)
{
    unsigned int imgNr = 0;
    unsigned int lensNr = m_variable_groups->getLenses().getPartNumber(imgNr);
    // get all images with the current lens.
    UIntSet imgs;
    for (unsigned int i = 0; i < m_pano->getNrOfImages(); i++)
    {
        if (m_variable_groups->getLenses().getPartNumber(i) == lensNr)
        {
            imgs.insert(i);
        };
    }
    PT::PanoCommand* cmd=NULL;
    if(ApplyLensParameters(this,m_pano,imgs,cmd))
    {
        GlobalCmdHist::getInstance().addCommand(cmd);
    };
}
EStatusCode CFFEmbeddedFontWriter::AddComponentGlyphs(unsigned int inGlyphID,UIntSet& ioComponents,bool &outFoundComponents)
{
	CharString2Dependencies dependencies;
	EStatusCode status = mOpenTypeInput.mCFF.CalculateDependenciesForCharIndex(0,inGlyphID,dependencies);

	if(PDFHummus::eSuccess == status && dependencies.mCharCodes.size() !=0)
	{
		UShortSet::iterator it = dependencies.mCharCodes.begin();
		for(; it != dependencies.mCharCodes.end() && PDFHummus::eSuccess == status; ++it)
		{
			bool dummyFound;
			ioComponents.insert(*it);
			status = AddComponentGlyphs(*it,ioComponents,dummyFound);
		}
		outFoundComponents = true;
	}
	else
		outFoundComponents = false;
	return status;
}
void TrueTypeEmbeddedFontWriter::AddDependentGlyphs(UIntVector& ioSubsetGlyphIDs)
{
	UIntSet glyphsSet;
	UIntVector::iterator it = ioSubsetGlyphIDs.begin();
	bool hasCompositeGlyphs = false;

	for(;it != ioSubsetGlyphIDs.end(); ++it)
		hasCompositeGlyphs |= AddComponentGlyphs(*it,glyphsSet);

	if(hasCompositeGlyphs)
	{
		UIntSet::iterator itNewGlyphs;

		for(it = ioSubsetGlyphIDs.begin();it != ioSubsetGlyphIDs.end(); ++it)
			glyphsSet.insert(*it);

		ioSubsetGlyphIDs.clear();
		for(itNewGlyphs = glyphsSet.begin(); itNewGlyphs != glyphsSet.end(); ++itNewGlyphs)
			ioSubsetGlyphIDs.push_back(*itNewGlyphs);
		
		sort(ioSubsetGlyphIDs.begin(),ioSubsetGlyphIDs.end());
	}
}
void AssistantPanel::OnLensTypeChanged (wxCommandEvent & e)
{
    // uses enum Lens::LensProjectionFormat from PanoramaMemento.h
    size_t var = GetSelectedProjection(m_lensTypeChoice);
    Lens lens = m_variable_groups->getLens(0);
    if (lens.getProjection() != (Lens::LensProjectionFormat) var) {
        //double crop = lens.getCropFactor();
        double fl = lens.getFocalLength();
        UIntSet imgs;
        imgs.insert(0);
        std::vector<PanoCommand*> commands;
        commands.push_back(
                new PT::ChangeImageProjectionCmd(
                                    *m_pano,
                                    imgs,
                                    (HuginBase::SrcPanoImage::Projection) var
                                )
            );
        
        commands.push_back(new PT::UpdateFocalLengthCmd(*m_pano, imgs, fl));
        GlobalCmdHist::getInstance().addCommand(
                new PT::CombinedPanoCommand(*m_pano, commands));
    }
}
int main(int argc, char* argv[])
{
    // parse arguments
    const char* optstring = "o:i:l:h";

    static struct option longOptions[] =
    {
        {"output", required_argument, NULL, 'o' },
        {"image", required_argument, NULL, 'i' },
        {"lines", required_argument, NULL, 'l' },
        {"help", no_argument, NULL, 'h' },
        0
    };

    UIntSet cmdlineImages;
    int c;
    int optionIndex = 0;
    int nrLines = 5;
    string output;
    while ((c = getopt_long (argc, argv, optstring, longOptions,&optionIndex)) != -1)
    {
        switch (c)
        {
            case 'o':
                output = optarg;
                break;
            case 'h':
                usage(argv[0]);
                return 0;
            case 'i':
                {
                    int imgNr=atoi(optarg);
                    if((imgNr==0) && (strcmp(optarg,"0")!=0))
                    {
                        cerr << "Could not parse image number.";
                        return 1;
                    };
                    cmdlineImages.insert(imgNr);
                };
                break;
            case 'l':
                nrLines=atoi(optarg);
                if(nrLines<1)
                {
                    cerr << "Could not parse number of lines.";
                    return 1;
                };
                break;
            case ':':
                cerr <<"Option " << longOptions[optionIndex].name << " requires a number" << endl;
                return 1;
                break;
            case '?':
                break;
            default:
                abort ();
        }
    }

    if (argc - optind != 1)
    {
        cout << "Warning: " << argv[0] << " can only work on one project file at one time" << endl << endl;
        usage(argv[0]);
        return 1;
    };

    string input=argv[optind];
    // read panorama
    Panorama pano;
    ifstream prjfile(input.c_str());
    if (!prjfile.good())
    {
        cerr << "could not open script : " << input << endl;
        return 1;
    }
    pano.setFilePrefix(hugin_utils::getPathPrefix(input));
    DocumentData::ReadWriteError err = pano.readData(prjfile);
    if (err != DocumentData::SUCCESSFUL)
    {
        cerr << "error while parsing panos tool script: " << input << endl;
        cerr << "DocumentData::ReadWriteError code: " << err << endl;
        return 1;
    }

    if(pano.getNrOfImages()==0)
    {
        cerr << "error: project file does not contains any image" << endl;
        cerr << "aborting processing" << endl;
        return 1;
    };

    std::vector<size_t> imagesToProcess;
    if(cmdlineImages.size()==0)
    {
        //no image given, process all
        for(size_t i=0;i<pano.getNrOfImages();i++)
        {
            imagesToProcess.push_back(i);
        };
    }
    else
    {
        //check, if given image numbers are valid
        for(UIntSet::const_iterator it=cmdlineImages.begin();it!=cmdlineImages.end();it++)
        {
            if((*it)>=0 && (*it)<pano.getNrOfImages())
            {
                imagesToProcess.push_back(*it);
            };
        };
    };

    if(imagesToProcess.size()==0)
    {
        cerr << "No image to process found" << endl << "Stopping processing" << endl;
        return 1;
    };

    PT_setProgressFcn(ptProgress);
    PT_setInfoDlgFcn(ptinfoDlg);

    cout << argv[0] << " is searching for vertical lines" << endl;
#if _WINDOWS
    //multi threading of image loading results sometime in a race condition
    //try to prevent this by initialisation of codecManager before
    //running multi threading part
    std::string s=vigra::impexListExtensions();
#endif
#ifdef HAS_PPL
    size_t nrCPS=pano.getNrOfCtrlPoints();
    Concurrency::parallel_for<size_t>(0,imagesToProcess.size(),[&pano,imagesToProcess,nrLines](size_t i)
#else
    for(size_t i=0;i<imagesToProcess.size();i++)
#endif
    {
        unsigned int imgNr=imagesToProcess[i];
        cout << "Working on image " << pano.getImage(imgNr).getFilename() << endl;
        // now load and process all images
        vigra::ImageImportInfo info(pano.getImage(imgNr).getFilename().c_str());
        HuginBase::CPVector foundLines;
        if(info.isGrayscale())
        {
            foundLines=LoadGrayImageAndFindLines(info, pano, imgNr, nrLines);
        }
        else
        {
            if(info.isColor())
            {
                //colour images
                foundLines=LoadImageAndFindLines(info, pano, imgNr, nrLines);
            }
            else
            {
                std::cerr << "Image " << pano.getImage(imgNr).getFilename().c_str() << " has " 
                    << info.numBands() << " channels." << std::endl
                    << "Linefind works only with grayscale or color images." << std::endl
                    << "Skipping image." << std::endl;
            };
        };
#ifndef HAS_PPL
        cout << "Found " << foundLines.size() << " vertical lines" << endl;
#endif
        if(foundLines.size()>0)
        {
            for(CPVector::const_iterator cpIt=foundLines.begin(); cpIt!=foundLines.end(); cpIt++)
            {
                pano.addCtrlPoint(*cpIt);
            };
        };
    }
#ifdef HAS_PPL
    );
CPVector AutoPanoSiftMultiRow::automatch(CPDetectorSetting &setting, Panorama & pano, const UIntSet & imgs,
                                     int nFeatures, int & ret_value, wxWindow *parent)
{
    CPVector cps;
    if (imgs.size() < 2) 
    {
        return cps;
    };
    std::vector<wxString> keyFiles(pano.getNrOfImages());
    //generate cp for every consecutive image pair
    unsigned int counter=0;
    for(UIntSet::const_iterator it = imgs.begin(); it != imgs.end(); )
    {
        if(counter==imgs.size()-1)
            break;
        counter++;
        UIntSet ImagePair;
        ImagePair.clear();
        ImagePair.insert(*it);
        it++;
        ImagePair.insert(*it);
        AutoPanoSift matcher;
        CPVector new_cps;
        new_cps.clear();
        if(setting.IsTwoStepDetector())
            new_cps=matcher.automatch(setting, pano, ImagePair, nFeatures, keyFiles, ret_value, parent);
        else
            new_cps=matcher.automatch(setting, pano, ImagePair, nFeatures, ret_value, parent);
        if(new_cps.size()>0)
            AddControlPointsWithCheck(cps,new_cps);
        if(ret_value!=0)
        {
            Cleanup(setting, pano, imgs, keyFiles, parent);
            return cps;
        };
    };
    // now connect all image groups
    // generate temporary panorama to add all found cps
    UIntSet allImgs;
    fill_set(allImgs, 0, pano.getNrOfImages()-1);
    Panorama optPano=pano.getSubset(allImgs);
    for (CPVector::const_iterator it=cps.begin();it!=cps.end();++it)
        optPano.addCtrlPoint(*it);

    CPGraph graph;
    createCPGraph(optPano, graph);
    CPComponents comps;
    int n = findCPComponents(graph, comps);
    if(n>1)
    {
        UIntSet ImagesGroups;
        for(unsigned int i=0;i<n;i++)
        {
            ImagesGroups.insert(*(comps[i].begin()));
            if(comps[i].size()>1)
                ImagesGroups.insert(*(comps[i].rbegin()));
        };
        AutoPanoSift matcher;
        CPVector new_cps;
        if(setting.IsTwoStepDetector())
            new_cps=matcher.automatch(setting, optPano, ImagesGroups, nFeatures, keyFiles, ret_value, parent);
        else
            new_cps=matcher.automatch(setting, optPano, ImagesGroups, nFeatures, ret_value, parent);
        if(new_cps.size()>0)
            AddControlPointsWithCheck(cps,new_cps,&optPano);
        if(ret_value!=0)
        {
            Cleanup(setting, pano, imgs, keyFiles, parent);
            return cps;
        };
        createCPGraph(optPano,graph);
        n=findCPComponents(graph, comps);
    };
    if(n==1 && setting.GetOption())
    {
        //next steps happens only when all images are connected;
        //now optimize panorama
        PanoramaOptions opts = pano.getOptions();
        opts.setProjection(PanoramaOptions::EQUIRECTANGULAR);
        // calculate proper scaling, 1:1 resolution.
        // Otherwise optimizer distances are meaningless.
        opts.setWidth(30000, false);
        opts.setHeight(15000);

        optPano.setOptions(opts);
        int w = optPano.calcOptimalWidth();
        opts.setWidth(w);
        opts.setHeight(w/2);
        optPano.setOptions(opts);

        //generate optimize vector, optimize only yaw and pitch
        OptimizeVector optvars;
        const SrcPanoImage & anchorImage = optPano.getImage(opts.optimizeReferenceImage);
        for (unsigned i=0; i < optPano.getNrOfImages(); i++) 
        {
            std::set<std::string> imgopt;
            if(i==opts.optimizeReferenceImage)
            {
                //optimize only anchors pitch, not yaw
                imgopt.insert("p");
            }
            else
            {
                // do not optimize anchor image's stack for position.
                if(!optPano.getImage(i).YawisLinkedWith(anchorImage))
                {
                    imgopt.insert("p");
                    imgopt.insert("y");
                };
            };
            optvars.push_back(imgopt);
        }
        optPano.setOptimizeVector(optvars);

        // remove vertical and horizontal control points
        CPVector backupOldCPS = optPano.getCtrlPoints();
        CPVector backupNewCPS;
        for (CPVector::const_iterator it = backupOldCPS.begin(); it != backupOldCPS.end(); it++) {
            if (it->mode == ControlPoint::X_Y)
            {
                backupNewCPS.push_back(*it);
            }
        }
        optPano.setCtrlPoints(backupNewCPS);
        // do a first pairwise optimisation step
        HuginBase::AutoOptimise::autoOptimise(optPano,false);
        HuginBase::PTools::optimize(optPano);
        optPano.setCtrlPoints(backupOldCPS);
        //and find cp on overlapping images
        //work only on image pairs, which are not yet connected
        AutoPanoSiftPreAlign matcher;
        CPDetectorSetting newSetting;
        newSetting.SetProg(setting.GetProg());
        newSetting.SetArgs(setting.GetArgs());
        if(setting.IsTwoStepDetector())
        {
            newSetting.SetProgMatcher(setting.GetProgMatcher());
            newSetting.SetArgsMatcher(setting.GetArgsMatcher());
        };
        newSetting.SetOption(true);
        CPVector new_cps;
        if(setting.IsTwoStepDetector())
            new_cps=matcher.automatch(newSetting, optPano, imgs, nFeatures, keyFiles, ret_value, parent);
        else
            new_cps=matcher.automatch(newSetting, optPano, imgs, nFeatures, ret_value, parent);
        if(new_cps.size()>0)
            AddControlPointsWithCheck(cps,new_cps);
    };
    Cleanup(setting, pano, imgs, keyFiles, parent);
    return cps;
};
CPVector AutoPanoSiftPreAlign::automatch(CPDetectorSetting &setting, Panorama & pano, const UIntSet & imgs,
                                         int nFeatures, std::vector<wxString> &keyFiles, int & ret_value, wxWindow *parent)
{
    CPVector cps;
    if (imgs.size()<2) 
        return cps;
    DEBUG_ASSERT(keyFiles.size()==pano.getNrOfImages());

    vector<UIntSet> usedImages;
    usedImages.resize(pano.getNrOfImages());
    if(setting.GetOption())
    {
        //only work on not connected image pairs
        CPVector oldCps=pano.getCtrlPoints();
        for(unsigned i=0;i<oldCps.size();i++)
        {
            if(oldCps[i].mode==ControlPoint::X_Y)
            {
                usedImages[oldCps[i].image1Nr].insert(oldCps[i].image2Nr);
                usedImages[oldCps[i].image2Nr].insert(oldCps[i].image1Nr);
            };
        };
    };
    HuginBase::CalculateImageOverlap overlap(&pano);
    overlap.calculate(10);
    for(UIntSet::const_iterator it=imgs.begin();it!=imgs.end();it++)
    {
        UIntSet images;
        images.clear();
        images.insert(*it);
        UIntSet::const_iterator it2=it;
        for(++it2;it2!=imgs.end();it2++)
        {
            //check if this image pair was yet used
            if(set_contains(usedImages[*it2],*it))
                continue;
            //now check position
            if(overlap.getOverlap(*it,*it2)>0)
            {
                images.insert(*it2);
            };
        };
        if(images.size()<2)
            continue;
        //remember image pairs for later
        for(UIntSet::const_iterator img_it=images.begin();img_it!=images.end();img_it++)
            for(UIntSet::const_iterator img_it2=images.begin();img_it2!=images.end();img_it2++)
                usedImages[*img_it].insert(*img_it2);
        AutoPanoSift matcher;
        CPVector new_cps;
        if(setting.IsTwoStepDetector())
            new_cps=matcher.automatch(setting, pano, images, nFeatures, keyFiles, ret_value, parent);
        else
            new_cps=matcher.automatch(setting, pano, images, nFeatures, ret_value, parent);
        if(new_cps.size()>0)
            AddControlPointsWithCheck(cps,new_cps);
        if(ret_value!=0)
        {
            Cleanup(setting, pano, imgs, keyFiles, parent);
            return cps;
        };
    };
    Cleanup(setting, pano, imgs, keyFiles, parent);
    return cps;
};
void CenterHorizontally::centerHorizontically(PanoramaData& panorama)
{
    vigra::Size2D panoSize(360,180);
    
    // remap into minature pano.
    PanoramaOptions opts;
    opts.setHFOV(360);
    opts.setProjection(PanoramaOptions::EQUIRECTANGULAR);
    opts.setWidth(360);
    opts.setHeight(180);
    
    // remap image
    vigra::BImage panoAlpha(panoSize);
    Nona::RemappedPanoImage<vigra::BImage, vigra::BImage> remapped;
    
    // use selected images.
    const UIntSet allActiveImgs(panorama.getActiveImages());

    if (allActiveImgs.empty())
    {
        // do nothing if there are no images
        return;
    }
    
    //only check unlinked images
    UIntSet activeImgs;
    for (UIntSet::const_iterator it = allActiveImgs.begin(); it!= allActiveImgs.end(); ++it)
    {
        const SrcPanoImage & img=panorama.getImage(*it);
        bool consider=true;
        if(img.YawisLinked())
        {
            for(UIntSet::const_iterator it2=activeImgs.begin(); it2!=activeImgs.end(); ++it2)
            {
                if(img.YawisLinkedWith(panorama.getSrcImage(*it2)))
                {
                    consider=false;
                    break;
                };
            };
        };
        if(consider)
            activeImgs.insert(*it);
    };

    for (UIntSet::iterator it = activeImgs.begin(); it != activeImgs.end(); ++it)
    {
        remapped.setPanoImage(panorama.getSrcImage(*it), opts, vigra::Rect2D(0,0,360,180));
        // calculate alpha channel
        remapped.calcAlpha();
        // copy into global alpha channel.
        vigra::copyImageIf(vigra_ext::applyRect(remapped.boundingBox(),
                                                vigra_ext::srcMaskRange(remapped)),
                           vigra_ext::applyRect(remapped.boundingBox(),
                                                vigra_ext::srcMask(remapped)),
                           vigra_ext::applyRect(remapped.boundingBox(),
                                                destImage(panoAlpha)));
        }
    
    // get field of view
    std::vector<int> borders;
    bool colOccupied = false;
    for (int h=0; h < 360; h++) {
        bool curColOccupied = false;
        for (int v=0; v< 180; v++) {
            if (panoAlpha(h,v)) {
                // pixel is valid
                curColOccupied = true;
            }
        }
        if ((colOccupied && !curColOccupied) ||
            (!colOccupied && curColOccupied))
        {
            // change in position, save point.
            borders.push_back(h-180);
            colOccupied = curColOccupied;
        }
    }
    
    
    int lastidx = borders.size() -1;
    if (lastidx == -1) {
        // empty pano
        return;
    }
    
    if (colOccupied) {
        // we have reached the right border, and the pano is still valid
        // shift right fragments by 360 deg
        // |11    2222|  -> |      222211     |
        std::vector<int> newBorders;
        newBorders.push_back(borders[lastidx]);
        for (int i = 0; i < lastidx; i++) {
            newBorders.push_back(borders[i]+360);
        }
        borders = newBorders;
    }
    
    const double dYaw=(borders[0] + borders[lastidx])/2;
    
    // apply yaw shift, takes also translation parameters into account
    RotatePanorama(panorama, -dYaw, 0, 0).run();
}