コード例 #1
0
ファイル: main.cpp プロジェクト: DerThorsten/exercise
int main() {
  vigra::ImageImportInfo info("herc.jpg");
  if (info.isGrayscale()) {
    vigra::BImage in(info.width(), info.height());
    vigra::importImage(info, destImage(in));
  }
  else {
    vigra::BRGBImage in(info.width(), info.height());
    vigra::BRGBImage out(info.width(), info.height());
    vigra::RGBValue<int> offset(255, 255, 255);
    vigra::importImage(info, destImage(in));
    vigra::BRGBImage::Iterator dy = in.upperLeft();
    vigra::BRGBImage::Iterator end = in.lowerRight();
    vigra::BRGBImage::Iterator oy = out.upperLeft();
    for (; dy.y != end.y && dy.y !=end.y-1; ++dy.y, ++oy.y) {
      vigra::BRGBImage::Iterator dx = dy;
      vigra::BRGBImage::Iterator ox = oy;
      for (; dx.x != end.x; ++dx.x, ++ox.x) {
	vigra::RGBValue<int> pix = *dx;
	*ox = offset - pix;
      }
    }
    vigra::exportImage(vigra::srcImageRange(out), vigra::ImageExportInfo("hercinv.jpg"));
  }
  return 0;
}
コード例 #2
0
//---------------------------------------------------------
bool CViGrA_Morphology::On_Execute(void)
{
	bool		bRescale;
	int			Type, Radius;
	double		Rank;
	CSG_Grid	*pInput, *pOutput, Rescaled;

	pInput		= Parameters("INPUT")	->asGrid();
	pOutput		= Parameters("OUTPUT")	->asGrid();
	Type		= Parameters("TYPE")	->asInt();
	Radius		= Parameters("RADIUS")	->asInt();
	Rank		= Parameters("RANK")	->asDouble();
	bRescale	= Parameters("RESCALE")	->asBool();

	//-----------------------------------------------------
	if( bRescale )
	{
		Rescaled.Create(*Get_System(), SG_DATATYPE_Byte);

		for(sLong i=0; i<Get_NCells() && Set_Progress_NCells(i); i++)
		{
			Rescaled.Set_Value(i, 0.5 + (pInput->asDouble(i) - pInput->Get_ZMin()) * 255.0 / pInput->Get_ZRange());
		}

		pInput	= &Rescaled;
	}

	//-----------------------------------------------------
	vigra::BImage	Input, Output(Get_NX(), Get_NY());

	Copy_Grid_SAGA_to_VIGRA(*pInput, Input, true);

	switch( Type )
	{
	case 0:	// Dilation
		discDilation		(srcImageRange(Input), destImage(Output), Radius);
		break;

	case 1:	// Erosion
		discErosion			(srcImageRange(Input), destImage(Output), Radius);
		break;

	case 2:	// Median
		discMedian			(srcImageRange(Input), destImage(Output), Radius);
		break;

	case 3:	// User defined rank
		discRankOrderFilter	(srcImageRange(Input), destImage(Output), Radius, Rank);
		break;
	}

	//-----------------------------------------------------
	Copy_Grid_VIGRA_to_SAGA(*pOutput, Output, false);

	pOutput->Set_Name(CSG_String::Format(SG_T("%s [%s]"), pInput->Get_Name(), Get_Name().c_str()));

	return( true );
}
コード例 #3
0
int main(int argc, char * const *argv)
{
  int M = 256;
  cv::Size imsize(512, 512);
  cv::Size psize(32,32);

  int lbdType;
  if (strncmp(argv[1], "freak", 5) == 0)
  {
    lbdType = lbd::eTypeFreak;;
  }
  else
  {
    if (strncmp(argv[1], "brief", 5) == 0)
    {
      lbdType = lbd::eTypeBrief;
    }
    else
    {
      std::cerr << "Error. The parameter should be either freak or brief.\n";
      return -1;
    }
  }

  lts2::LBDOperator *LBD = lts2::CreateLbdOperator(lbdType, M);
  LBD->initWithPatchSize(psize);

  cv::Mat destImage(cv::Size(imsize), CV_8UC3);
  LBD->drawSelfAsCircles(destImage);
  
  cv::imwrite(argv[2], destImage);

  return EXIT_SUCCESS;
}
コード例 #4
0
void EdgeHistogram::InitializeImage(const vigra::FVector3Image& image) {
	// Consider grayscale image
	vigra::FImage image_gray(image.width(), image.height());
	vigra::transformImage(srcImageRange(image), destImage(image_gray),
		VectorMeanTransformAccessor<vigra::FVector3Image::Accessor>());

	// 1. Compute canny edge image
	vigra::BImage canny(image_gray.width(), image_gray.height());
	canny = 0;
	vigra::cannyEdgeImage(srcImageRange(image_gray), destImage(canny),
		1, 15, 255);	// scale, threshold, edgevalue
	//vigra::exportImage(srcImageRange(canny), "canny.png");

	// 2. Get gradient magnitude and orientation of original image
	vigra::FVector2Image gradient(canny.width(), canny.height());
	vigra::gradientBasedTransform(srcImageRange(image_gray),
		destImage(gradient),
		MagnitudeOrientationGradientFunctor<float>(undirected_edges));

	// 3. Produce matrices: histogram bin and gradient magnitude for each pixel
	bin_image.resize(gradient.height(), gradient.width());
	gmm::fill(bin_image, 0);
	bin_value.resize(gradient.height(), gradient.width());
	gmm::fill(bin_value, 0);
	for (int y = 0; y < gradient.height(); ++y) {
		for (int x = 0; x < gradient.width(); ++x) {
			// No edge -> skip
			if (canny(x, y) == 0)
				continue;

			// Edge: calculate bin index and gradient magnitude
			double magnitude = gradient(x, y)[0];
			double orientation = gradient(x, y)[1];	// range 0 to 1.
			assert(orientation >= 0.0 && orientation <= 1.0);
			if (orientation >= 1.0)
				orientation = 0.0;
			orientation *= static_cast<double>(bin_count);
			int orientation_index = static_cast<int>(orientation) % bin_count;

			// Bin value of zero denotes no edge.
			bin_image(y, x) = orientation_index + 1;
			bin_value(y, x) = magnitude;
		}
	}
}
コード例 #5
0
void PreviewColorPickerTool::CalcCorrectionForImage(unsigned int i,vigra::Point2D pos)
{
    const HuginBase::SrcPanoImage & img = helper->GetPanoramaPtr()->getImage(i);
    HuginBase::ImageCache::ImageCacheRGB8Ptr cacheImage8 = HuginBase::ImageCache::getInstance().getImage(img.getFilename())->get8BitImage();

    //copy only region to be inspected
    vigra::BRGBImage tempImage(2*ColorPickerSize,2*ColorPickerSize);
    vigra::copyImage(vigra::make_triple((*cacheImage8).upperLeft() + pos + vigra::Point2D(-ColorPickerSize,-ColorPickerSize),
                                        (*cacheImage8).upperLeft() + pos + vigra::Point2D( ColorPickerSize, ColorPickerSize),
                                        vigra::BRGBImage::Accessor()),
                     destImage(tempImage) );

    //now apply photometric corrections
    HuginBase::Photometric::InvResponseTransform<vigra::UInt8, double> invResponse(img);
    if (helper->GetPanoramaPtr()->getOptions().outputMode == HuginBase::PanoramaOptions::OUTPUT_LDR)
    {
        // select exposure and response curve for LDR output
        std::vector<double> outLut;
        vigra_ext::EMoR::createEMoRLUT(helper->GetPanoramaPtr()->getImage(0).getEMoRParams(), outLut);
        vigra_ext::enforceMonotonicity(outLut);
        invResponse.setOutput(1.0/pow(2.0,helper->GetPanoramaPtr()->getOptions().outputExposureValue), outLut,
                              255.0);
    }
    else
    {
        invResponse.setHDROutput(true,1.0/pow(2.0,helper->GetPanoramaPtr()->getOptions().outputExposureValue));
    }
    vigra::DRGBImage floatTemp(tempImage.size());
    vigra_ext::transformImageSpatial(srcImageRange(tempImage), destImage(floatTemp), invResponse, vigra::Diff2D(pos.x-ColorPickerSize,pos.y-ColorPickerSize));

    //calculate average
    vigra::FindAverage<vigra::DRGBImage::PixelType> average;
    vigra::inspectImage(srcImageRange(floatTemp), average);
    //range check
    vigra::RGBValue<double> RGBaverage=average.average();
    if(RGBaverage[0]>2 && RGBaverage[0]<253 &&
       RGBaverage[1]>2 && RGBaverage[1]<253 &&
       RGBaverage[2]>2 && RGBaverage[2]<253)
    {
        m_red+=RGBaverage[0]/RGBaverage[1];
        m_blue+=RGBaverage[2]/RGBaverage[1];
        m_count++;
    };
};
コード例 #6
0
void GCOutputBoykovWorker::run()
{
  const int width = m_SourceImage.width();
  const int height = m_SourceImage.height();
  const int nLabels = 2;
  cv::Mat src(height, width, CV_8UC3, m_SourceImage.bits(), m_SourceImage.bytesPerLine());

  // Allocate graph
  int num_nodes = width * height;
  int num_edges = (width-1)*height + width*(height-1);
  typedef Graph<int,int,int> GraphType;
  GraphType* graph = new GraphType(num_nodes, num_edges);
  graph->add_node(num_nodes);

  // Initialize Data Term
  generateDataTerm([graph,width](int x, int y, int dterm1, int dterm2){
    GraphType::node_id node = y * width + x;
    graph->add_tweights(node, dterm1, dterm2);
  });

  // Initialize Smoothness Term
  generateSmoothTerm([graph,width](int x1, int y1, int x2, int y2, int cap1, int cap2){
    GraphType::node_id node1, node2;
    node1 = y1 * width + x1, node2 = y2 * width + x2;
    graph->add_edge(node1, node2, cap1, cap2);
  });

  // Compute
  int flow = graph->maxflow();
  qDebug("Flow = %d", flow);

  // Read Result
  QImage destImage(width, height, QImage::Format_RGB888);
  cv::Mat dst(height, width, CV_8UC3, destImage.bits(), destImage.bytesPerLine());
  for (int y = 0; y < dst.rows; ++y) {
    for (int x = 0; x < dst.cols; ++x) {
      GraphType::node_id node = y * width + x;

      if (graph->what_segment(node) == GraphType::SOURCE) {
        dst.at<cv::Vec3b>(y,x)[0] = 255;
        dst.at<cv::Vec3b>(y,x)[1] = 255;
        dst.at<cv::Vec3b>(y,x)[2] = 255;
      } else {
        dst.at<cv::Vec3b>(y,x)[0] = 0;
        dst.at<cv::Vec3b>(y,x)[1] = 0;
        dst.at<cv::Vec3b>(y,x)[2] = 0;
      }
    }
  }

  delete graph;

  emit completed(destImage);
}
コード例 #7
0
ファイル: mainwindow.cpp プロジェクト: Damyanka/medianFilter
void MainWindow::applyMedianFilter()
{    
    int filterRadius = ui->lineEdit->text().toInt();
    if (filterRadius > 30 || filterRadius <= 0)
    {
        QMessageBox::warning(this, tr("Incorrect radius"), tr("Please choose radius between 1 and 30"), QMessageBox::Ok);
        return;
    }

    if(!ui->labelImage->pixmap())
    {
        QMessageBox::warning(this, tr("No image"), tr("Please select image."), QMessageBox::Ok);
        return;
    }

    int imageHeight = image.height(), imageWeight = image.width();
    //qDebug()<<"image size"<<image.height()<<image.width();

    if (filterRadius > imageHeight/2 || filterRadius >imageWeight/2)
    {
        QMessageBox::warning(this, tr("Incorrect radius"), tr("The chosen filter radius is too big for this image!"), QMessageBox::Ok);
        return;
    }

    MedianFilter medianFilter;
    element* resImageBits;
    resImageBits = new element[imageHeight * imageWeight];
    if (!resImageBits)
        return;

    medianFilter.applyMedianFilter((element*)image.bits(), resImageBits, imageHeight, imageWeight, filterRadius);
    if(resImageBits)
    {
        QImage destImage((uchar*)resImageBits, imageWeight, imageHeight, image.format());
        QPixmap pixRes;
        pixRes.convertFromImage(destImage);
        ui->labelMedianFilterResult->setPixmap(pixRes.scaled(MAXSIDE, MAXSIDE, Qt::KeepAspectRatio));
    }
    else
        qDebug()<<"median filter failed";
    delete [] resImageBits;

    /*
    QImage imageTestGray8 = ui->labelImage->pixmap()->toImage().convertToFormat(QImage::Format_Grayscale8);
    QImage destImageTestGray8;
    qDebug()<<imageTestGray8<<imageTestGray8.pixelFormat().channelCount() <<imageTestGray8.pixelFormat().bitsPerPixel();
    QPixmap pixmapRezGray8;
    pixmapRezGray8.convertFromImage(destImageTestGray8);
    ui->labelMedianFilterResult->setPixmap(pixmapRezGray8);

    // ctmf(image.bits(), destImage.bits(), image.width(), image.height(),
    //    image.pixelFormat().bitsPerPixel(), image.pixelFormat().bitsPerPixel(), image.pixelFormat().channelCount(), filterRadius, 256*1024);
    */
}
コード例 #8
0
bool RotoCanvas::exportFrame(QDir destinationDir, const QString &sequenceName, const char *fileFormat, int frameNumber)
{
    bool result=false;
    QString formatString=QString(fileFormat).toLower();
    if (this->loadedFI!=nullptr) {
        QString destPath=getFramePath(frameNumber);//destinationDir.filePath(sequenceName+RotoCanvas::getZeroPadded(frameNumber, this->getSeqDigitCount())+"."+QString(fileFormat));
        QImage destImage(originalImage.size(), QImage::Format_ARGB32);
        destImage.fill(qRgba(0,0,0,0));
        QPainter destPainter(&destImage);
        for (int i=0; i<layerCount; i++) {
            if (i<layerPtrs.length()) {
                if (layerPtrs[i]!=nullptr) {
                    destPainter.drawImage(0,0,layerPtrs[i]->image);
                }
            }
        }
        //(formatString=="png")?QImage::Format_ARGB32:QImage::Format_RGB888
        result=destImage.save(destPath, fileFormat);
    }
    return result;
}
コード例 #9
0
void CViGrA_Watershed::Segmentation(TImage_In &Input, TImage_Out &Output, double Scale, bool bEdges)
{
	typedef typename vigra::NumericTraits<typename TImage_In::value_type>::RealPromote	TmpType;

	vigra::BasicImage<TmpType>	gradientx	(Get_NX(), Get_NY());
	vigra::BasicImage<TmpType>	gradienty	(Get_NX(), Get_NY());
	vigra::FImage				gradientmag	(Get_NX(), Get_NY());
	vigra::IImage				labels		(Get_NX(), Get_NY());

	//-----------------------------------------------------
	// calculate the x- and y-components of the image gradient at given scale
	Process_Set_Text(_TL("calculate gradients"));

	recursiveFirstDerivativeX	(srcImageRange(Input)		, destImage(gradientx), Scale);
	recursiveSmoothY			(srcImageRange(gradientx)	, destImage(gradientx), Scale);

	recursiveFirstDerivativeY	(srcImageRange(Input)		, destImage(gradienty), Scale);
	recursiveSmoothX			(srcImageRange(gradienty)	, destImage(gradienty), Scale);

	//-----------------------------------------------------
	// transform components into gradient magnitude
	Process_Set_Text(_TL("calculate gradient magnitude"));

	combineTwoImages(
		srcImageRange(gradientx),
		srcImage(gradienty),
		destImage(gradientmag),
		GradientSquaredMagnitudeFunctor()
	);

	//-----------------------------------------------------
	// find the local minima of the gradient magnitude (might be larger than one pixel)
	Process_Set_Text(_TL("find local minima"));

	labels	= 0;

	extendedLocalMinima(srcImageRange(gradientmag), destImage(labels), 1);

	//-----------------------------------------------------
	// label the minima just found
	Process_Set_Text(_TL("label minima"));

	int max_region_label	= labelImageWithBackground(srcImageRange(labels), destImage(labels), false, 0);

	//-----------------------------------------------------
	// create a statistics functor for region growing
	vigra::ArrayOfRegionStatistics<vigra::SeedRgDirectValueFunctor<float> >gradstat(max_region_label);

	//-----------------------------------------------------
	// perform region growing, starting from the minima of the gradient magnitude;
	// as the feature (first input) image contains the gradient magnitude,
	// this calculates the catchment basin of each minimum
	Process_Set_Text(_TL("perform region growing"));

	seededRegionGrowing(srcImageRange(gradientmag), srcImage(labels), destImage(labels), gradstat);

	//-----------------------------------------------------
	// initialize a functor to determine the average gray-value or color for each region (catchment basin) just found
	vigra::ArrayOfRegionStatistics<vigra::FindAverage<TmpType> >averages(max_region_label);

	//-----------------------------------------------------
	// calculate the averages
	Process_Set_Text(_TL("calculate averages"));

	inspectTwoImages(srcImageRange(Input), srcImage(labels), averages);

	//-----------------------------------------------------
	// write the averages into the destination image (the functor 'averages' acts as a look-up table)
	transformImage(srcImageRange(labels), destImage(Output), averages);

	//-----------------------------------------------------
	// mark the watersheds (region boundaries) black
	if( bEdges )
	{
		regionImageToEdgeImage(srcImageRange(labels), destImage(Output), vigra::NumericTraits<typename TImage_Out::value_type>::zero());
	}
}
コード例 #10
0
ファイル: storm.cpp プロジェクト: ukoethe/simple-STORM
// MAIN
int main(int argc, char* argv[]) {

    try
    {
        DataParams params(argc, argv);

        params.doSanityChecks();
        int stacksize = params.shape(2);
        Size2D size2 (params.shape(0), params.shape(1)); // isnt' there a slicing operator?


        if(params.getVerbose()) {
            std::cout << "Images with Shape: " << params.shape() << std::endl;
            std::cout << "Processing a stack of " << stacksize << " images..." << std::endl;
        }


        // found spots. One Vector over all images in stack
        // the inner set contains all spots in the image
        std::vector<std::set<Coord<float> > > res_coords(stacksize);

        DImage res((size2-Diff2D(1,1))*params.getFactor()+Diff2D(1,1));
        // check if outfile is writable, otherwise throw error -> exit
        exportImage(srcImageRange(res), ImageExportInfo(params.getOutFile().c_str()));
        std::ofstream cf;
        if(!params.getCoordsFile().empty()) {
            cf.open(params.getCoordsFile());
            vigra_precondition(cf.is_open(), "Could not open coordinate-file for writing.");
        }

        USE_NESTED_TICTOC;
        //USETICTOC;
        TICPUSH;  // measure the time

        // STORM Algorithmut

        CliProgressFunctor func;
        wienerStorm(params, res_coords, func);

        // resulting image
        drawCoordsToImage<Coord<float> >(res_coords, res);

        int numSpots = 0;
        if(cf.is_open()) {
            numSpots = saveCoordsFile(params, cf, res_coords);
            cf.close();
        }

        // end: done.
        std::cout << std::endl << TOCS << std::endl;
        std::cout << "detected " << numSpots << " spots." << std::endl;

        // some maxima are very strong so we scale the image as appropriate :
        double maxlim = 0., minlim = 0;
        findMinMaxPercentile(res, 0., minlim, 0.996, maxlim);
        std::cout << "cropping output values to range [" << minlim << ", " << maxlim << "]" << std::endl;
        if(maxlim > minlim) {
            transformImage(srcImageRange(res), destImage(res), ifThenElse(Arg1()>Param(maxlim), Param(maxlim), Arg1()));
        }
        exportImage(srcImageRange(res), ImageExportInfo(params.getOutFile().c_str()));
        if (!params.getSettingsFile().empty())
            params.save();


    }
    catch (vigra::StdException & e)
    {
        std::cout<<"There was an error:"<<std::endl;
        std::cout << e.what() << std::endl;
        return 1;
    }

    return 0;
}
コード例 #11
0
ファイル: features.cpp プロジェクト: WangDequan/cs4670
// Compute MOPs descriptors.
void ComputeMOPSDescriptors(CFloatImage &image, FeatureSet &features)
{
	int w = image.Shape().width;  // image width
	int h = image.Shape().height; // image height

	// Create grayscale image used for Harris detection
	CFloatImage grayImage=ConvertToGray(image);

	// Apply a 7x7 gaussian blur to the grayscale image
	CFloatImage blurImage(w,h,1);
	Convolve(grayImage, blurImage, ConvolveKernel_7x7);

	// Transform matrices
	CTransform3x3 xform;
	CTransform3x3 trans1;
	CTransform3x3 rotate;
	CTransform3x3 scale;
	CTransform3x3 trans2;

	// Declare additional variables
	float pxl;					// pixel value
	double mean, sq_sum, stdev; // variables for normailizing data set

	// This image represents the window around the feature you need to compute to store as the feature descriptor
	const int windowSize = 8;
	CFloatImage destImage(windowSize, windowSize, 1);

	for (vector<Feature>::iterator i = features.begin(); i != features.end(); i++) {
		Feature &f = *i;

		// Compute the transform from each pixel in the 8x8 image to sample from the appropriate 
		// pixels in the 40x40 rotated window surrounding the feature
		trans1 = CTransform3x3::Translation(f.x, f.y);						// translate window to feature point
		rotate = CTransform3x3::Rotation(f.angleRadians * 180.0 / PI);		// rotate window by angle
		scale = CTransform3x3::Scale(5.0);									// scale window by 5
		trans2 = CTransform3x3::Translation(-windowSize/2, -windowSize/2);	// translate window to origin

		// transform resulting from combining above transforms
		xform = trans1*scale*rotate*trans2;

		//Call the Warp Global function to do the mapping
		WarpGlobal(blurImage, destImage, xform, eWarpInterpLinear);

		// Resize data field for a 8x8 square window
		f.data.resize(windowSize * windowSize);	

		// Find mean of window
		mean = 0;
		for (int y = 0; y < windowSize; y++) {
			for (int x = 0; x < windowSize; x++) {
				pxl = destImage.Pixel(x, y, 0);
				f.data[y*windowSize + x] = pxl;
				mean += pxl/(windowSize*windowSize);
			}
		}

		// Find standard deviation of window
		sq_sum = 0;
		for (int k = 0; k < windowSize*windowSize; k++) {
			sq_sum += (mean - f.data[k]) * (mean - f.data[k]);
		}
		stdev = sqrt(sq_sum/(windowSize*windowSize));

		// Normalize window to have 0 mean and unit variance by subtracting
		// by mean and dividing by standard deviation
		for (int k = 0; k < windowSize*windowSize; k++) {
			f.data[k] = (f.data[k]-mean)/stdev;
		}
	}
}
コード例 #12
0
void CenterHorizontally::centerHorizontically(PanoramaData& panorama)
{
    vigra::Size2D panoSize(360,180);
    
    // remap into minature pano.
    PanoramaOptions opts;
    opts.setHFOV(360);
    opts.setProjection(PanoramaOptions::EQUIRECTANGULAR);
    opts.setWidth(360);
    opts.setHeight(180);
    
    // remap image
    vigra::BImage panoAlpha(panoSize);
    Nona::RemappedPanoImage<vigra::BImage, vigra::BImage> remapped;
    
    // use selected images.
    const UIntSet allActiveImgs(panorama.getActiveImages());

    if (allActiveImgs.empty())
    {
        // do nothing if there are no images
        return;
    }
    
    //only check unlinked images
    UIntSet activeImgs;
    for (UIntSet::const_iterator it = allActiveImgs.begin(); it!= allActiveImgs.end(); ++it)
    {
        const SrcPanoImage & img=panorama.getImage(*it);
        bool consider=true;
        if(img.YawisLinked())
        {
            for(UIntSet::const_iterator it2=activeImgs.begin(); it2!=activeImgs.end(); ++it2)
            {
                if(img.YawisLinkedWith(panorama.getSrcImage(*it2)))
                {
                    consider=false;
                    break;
                };
            };
        };
        if(consider)
            activeImgs.insert(*it);
    };

    for (UIntSet::iterator it = activeImgs.begin(); it != activeImgs.end(); ++it)
    {
        remapped.setPanoImage(panorama.getSrcImage(*it), opts, vigra::Rect2D(0,0,360,180));
        // calculate alpha channel
        remapped.calcAlpha();
        // copy into global alpha channel.
        vigra::copyImageIf(vigra_ext::applyRect(remapped.boundingBox(),
                                                vigra_ext::srcMaskRange(remapped)),
                           vigra_ext::applyRect(remapped.boundingBox(),
                                                vigra_ext::srcMask(remapped)),
                           vigra_ext::applyRect(remapped.boundingBox(),
                                                destImage(panoAlpha)));
        }
    
    // get field of view
    std::vector<int> borders;
    bool colOccupied = false;
    for (int h=0; h < 360; h++) {
        bool curColOccupied = false;
        for (int v=0; v< 180; v++) {
            if (panoAlpha(h,v)) {
                // pixel is valid
                curColOccupied = true;
            }
        }
        if ((colOccupied && !curColOccupied) ||
            (!colOccupied && curColOccupied))
        {
            // change in position, save point.
            borders.push_back(h-180);
            colOccupied = curColOccupied;
        }
    }
    
    
    int lastidx = borders.size() -1;
    if (lastidx == -1) {
        // empty pano
        return;
    }
    
    if (colOccupied) {
        // we have reached the right border, and the pano is still valid
        // shift right fragments by 360 deg
        // |11    2222|  -> |      222211     |
        std::vector<int> newBorders;
        newBorders.push_back(borders[lastidx]);
        for (int i = 0; i < lastidx; i++) {
            newBorders.push_back(borders[i]+360);
        }
        borders = newBorders;
    }
    
    const double dYaw=(borders[0] + borders[lastidx])/2;
    
    // apply yaw shift, takes also translation parameters into account
    RotatePanorama(panorama, -dYaw, 0, 0).run();
}