void OpenCVPicture::affineTransform(float c00, float c01, float c10,
                                    float c11) {

  transformImage(mat, backgroundColor, c00, c01, c10, c11);
  xOffset = -mat.cols / 2;
  yOffset = -mat.rows / 2;
}
void OpenCVPicture::loadData(int scale, int flags) {
  readImage(filename, mat, flags);
  float s = scale * 1.0f / std::min(mat.rows, mat.cols);
  transformImage(mat, backgroundColor, s, 0, 0, s);
  xOffset = -mat.cols / 2;
  yOffset = -mat.rows / 2;
}
void OpenCVPicture::loadData(int scale, int flags) {
    loadDataWithoutScaling(flags);
    float s = scale * 1.0f / std::min(mat.rows, mat.cols);
    transformImage(mat, backgroundColor, s, 0, 0, s);
    xOffset = -mat.cols / 2;
    yOffset = -mat.rows / 2;
}
 MdiChild::MdiChild(QMdiArea* parent,face* FaceObject)
 {
	 prnt = parent;
	 h = 0;
     setAttribute(Qt::WA_DeleteOnClose);
	 this->FaceObject = FaceObject;
	 
	 

	 image = new QLabel(this);
	 image->setScaledContents(true);
//	 image->setAlignment(Qt::AlignTop);
	 image->setPixmap(*transformImage());
	 image->setGeometry(0,0,w,h);

	 if(FaceObject->getLabel() == ""){
		labelTextEdit = new QLineEdit(this);
//		labelTextEdit->setAlignment(Qt::AlignBottom);
		labelTextEdit->setGeometry(0,h,w,20);
		labelTextEdit->setText("add label");
		//labelTextEdit->addAction("add label");
		h +=20;
	 }
	 
	 this->setContextMenuPolicy(Qt::CustomContextMenu);
	 

 }
Beispiel #5
0
void ImagePane::rotateAntiClockwise()
{
    rotation-=5; // -5 degrees
    if(rotation < 0)		//keep 0 <= rotation < 360
        rotation += 360;
    transformImage();
    update();
}
Beispiel #6
0
void ImagePane::rotateClockwise()
{
    rotation+=5; // +5 degrees
    if(rotation>=360)			//keep 0 <= rotation < 360
        rotation -= 360;
    transformImage();
    update();
}
Beispiel #7
0
void ImagePane::zoomOut()
{
    if(zoom>0.1)
    {
        zoom*=0.75; //shrink displayed image to 3/4
        transformImage();
        update();
    }
}
Beispiel #8
0
void ImagePane::zoomIn()
{
    if(zoom<5.0)
    {
        zoom *= 4.0/3.0; //enlarge displayed image by 1+1/3
        transformImage();
        update();
    }
}
Beispiel #9
0
void ImagePane::reset()
{
    zoom = 1;
    rotation = 0;
    indexNumbersVisible = false;
    transformedImage = currentImage;
    const QRect res = QApplication::desktop()->availableGeometry();
    while(transformedImage.height() > res.height() ||
          transformedImage.width() > res.width())
    {
        zoomOut();
    }
    //in case no resizing took place, we still need to ensure that
    //private member transform is updated, so...
    transformImage();
    update();
    show();
}
Picture* OpenCVPicture::distort(RNG& rng, batchType type) {
  OpenCVPicture* pic=new OpenCVPicture(*this);
  pic->loadData();
  if (type==TRAINBATCH) {
    float
      c00=1, c01=0,  //2x2 identity matrix---starting point for calculating affine distortion matrix
      c10=0, c11=1;
    c00*=1+rng.uniform(-0.2,0.2); // x stretch
    c11*=1+rng.uniform(-0.2,0.2); // y stretch
    if (rng.randint(2)==0) c00*=-1; //Horizontal flip
    int r=rng.randint(3);
    float alpha=rng.uniform(-0.2,0.2);
    if (r==0) matrixMul2x2inPlace(c00,c01,c10,c11,1,0,alpha,1); //Slant
    if (r==1) matrixMul2x2inPlace(c00,c01,c10,c11,1,alpha,0,1); //Slant other way
    if (r==2) matrixMul2x2inPlace(c00,c01,c10,c11,cos(alpha),-sin(alpha),sin(alpha),cos(alpha)); //Rotate
    transformImage(pic->mat, backgroundColor, c00, c01, c10, c11);
    pic->jiggle(rng,16);
  }
  return pic;
}
const ImageLandmarkDataPtr ImageTransformer::TransformDataWithMat(const ImageLandmarkDataPtr& landmarkData,
                                                                  const cv::Mat& transformMat,
                                                                  bool shouldSaveNewImage)
{
  
  PointArrayPtr newLandmarks = transformLandmarks(landmarkData->Landmarks(), transformMat, [&] (const PointType& point) {
    
    return (point.x >= 0 &&
            point.x < landmarkData->ImageSource().cols &&
            point.y >= 0 &&
            point.y < landmarkData->ImageSource().rows);
  });
  
  ImageLandmarkDataPtr newLandmarkData;
  if (newLandmarks)
  {
    const cv::Mat newImage = transformImage(landmarkData->ImageSource(), transformMat);
    newLandmarkData = createLandmarkData(newLandmarks, newImage, shouldSaveNewImage);
  }
  
  return newLandmarkData;
}
/*鼠标拖拽响应*/
void SpaceTranformWindow::mouseMoveEvent(QMouseEvent* event)
{
    QPoint mousePos = event->pos();

    /*鼠标在图片框内*/
    if(ui->ShowImage->rect().contains(mousePos))
    {
        switch(_Mode)
        {
        case DONOTHING: /*不做处理*/
        {
            break;
        }
        case POSITION:  /*平移*/
        {
            if(mousePos != _vectorStart)
            {
                _VectorMove += (mousePos - _vectorStart);

                _ShowImageCenter += (mousePos - _vectorStart);

                _vectorStart = mousePos;
                transformImage();
            }
            break;
        }
        case ROTATION:  /*旋转*/
        {
            /*旋转角度*/
            double endAngle;
            double angleTurned;

            endAngle = atan2(mousePos.y()-_ShowImageCenter.y(),
                             mousePos.x()-_ShowImageCenter.x());
            angleTurned = endAngle - _TurnAngle;
            if(angleTurned!=0)
            {
                _TransformAngle += angleTurned;
                _TurnAngle = endAngle;
                transformImage();
            }
            break;
        }
        case RESIZE_LIMITED:    /*等比例缩放*/
        {
            QPoint strVector(_vectorStart.x()-_ShowImageCenter.x(),
                             _vectorStart.y()-_ShowImageCenter.y());
            QPoint endVector(mousePos.x()-_ShowImageCenter.x(),
                             mousePos.y()-_ShowImageCenter.y());

            if(strVector.x()!=0)
            {
                _Scale_width_ShowImage = static_cast<double>(endVector.x())/strVector.x();
                if(_Scale_width_ShowImage <=0)
                {
                    _Scale_width_ShowImage =1;
                }
            }
            else
            {
                _Scale_width_ShowImage = 1;
            }

            _Scale_height_ShowImage = _Scale_width_ShowImage;

            transformImage();
            break;
        }
        case RESIZE_UNLIMITED:  /*非等比例缩放*/
        {
            QPoint strVector(_vectorStart.x()-_ShowImageCenter.x(),
                             _vectorStart.y()-_ShowImageCenter.y());
            QPoint endVector(mousePos.x()-_ShowImageCenter.x(),
                             mousePos.y()-_ShowImageCenter.y());

            if(strVector.x()!=0)
            {
                _Scale_width_ShowImage = static_cast<double>(endVector.x())/strVector.x();
                if(_Scale_width_ShowImage <=0)
                {
                    _Scale_width_ShowImage =1;
                }
            }
            else
            {
                _Scale_width_ShowImage = 1;
            }


            if(strVector.y()!=0)
            {
                _Scale_height_ShowImage = static_cast<double>(endVector.y())/strVector.y();
                if(_Scale_height_ShowImage <=0)
                {
                    _Scale_height_ShowImage =1;
                }
            }
            else
            {
                _Scale_height_ShowImage = 1;
            }

            transformImage();
            break;
        }
        }
    }

    /*鼠标在图片框外*/
    else
    {
        mouseReleaseEvent(event);
        _Mode = DONOTHING;
    }
}
Beispiel #13
0
Mat ScreenDetector::getTransformationMatrix(Error& error)
{
    bool approxFound = false;

    // convert image to HSV
    cvtColor(img, hsv, CV_BGR2HSV);

    // threshold the image
    inRange(hsv, hsvMin, hsvMax, thresholded);

    // Optimize threshold by reducing noise
    erode(thresholded, thresholded, getStructuringElement(MORPH_ELLIPSE, Size(erodeDilateSize, erodeDilateSize)) );
    dilate( thresholded, thresholded, getStructuringElement(MORPH_ELLIPSE, Size(erodeDilateSize, erodeDilateSize)) );
    dilate( thresholded, thresholded, getStructuringElement(MORPH_ELLIPSE, Size(erodeDilateSize, erodeDilateSize)) );
    erode(thresholded, thresholded, getStructuringElement(MORPH_ELLIPSE, Size(erodeDilateSize, erodeDilateSize)) );
    GaussianBlur(thresholded, thresholded, Size(3,3), 0);

    Mat forContours;
    thresholded.copyTo(forContours);
    // find all contours
    Contours contours;
    Contour approximatedScreen;
    findContours(forContours, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
    int nbContours = contours.size();
    cout << nbContours << " contours found, debug: " << DEBUG << endl;

    if(nbContours == 0)
    {
        error.setError("Unable to find the screen",
                       "The camera doesn't detect any screen or green element."
                       "Please check if your screen is turned on and directed toward the screen");
        return img;
    }

    sort(contours.begin(), contours.end(), contour_compare_area);

    // find the contour with the biggest area that have 4 points when approximated
    for(int i=0; i < nbContours; ++i)
    {
        approxPolyDP(contours.at(i), approximatedScreen, approximateEpsilon * arcLength(contours.at(i), true), true);
        // our screen has 4 point when approximated
        if(approximatedScreen.size() == 4)
        {
            approxFound = true;
            break;
        }
    }

    if(!approxFound)
    {
        error.setError("Unable to find the screen properly",
                       "It seems that the screen is not fully detectable by the camera. Try to reduce light in your room");
        return img;
    }

    if(DEBUG)
    {
        namedWindow("debug", WINDOW_KEEPRATIO);
        namedWindow("thresholded_calibration", WINDOW_KEEPRATIO);
        Mat debug = Mat::zeros(img.rows, img.cols, CV_8UC3);
        polylines(debug, approximatedScreen, true, Scalar(0,0,255), 3);
        imshow("debug", debug);
        imshow("thresholded_calibration", thresholded);
    }

    return transformImage(approximatedScreen);
}
void convertImages(Arguments* args){
    char** mask = NULL;
    TwoPoints source, dest;
    FILE* eyeList;
    char line[ FILE_LINE_LENGTH ];
    char filename[MAX_FILENAME_LENGTH];
    char imagename[MAX_FILENAME_LENGTH];
    char suffix[MAX_FILENAME_LENGTH];
    int i;

    scaleArgs(args, args->scale);

    dest.x1 = args->eyeLx;
    dest.y1 = args->eyeLy;
    dest.x2 = args->eyeRx;
    dest.y2 = args->eyeRy;

    /* Prepare file suffix encoding preprocessing settings, blank if not requested */
    if (args->configSuffix) {
        sprintf(suffix,"_%s", imageSuffix(args)); }
    else {
        suffix[0] = '\0'; }	

    if(args->maskType == CL_YES){
        MESSAGE("Creating Mask.");
        mask = generateMask(args->sizeWidth, args->sizeHeight, args->ellipseX, args->ellipseY, args->ellipseA, args->ellipseB);
    }

    eyeList = fopen(args->eyeFile,"r");
    DEBUG_CHECK(eyeList,"Error opening eye coordinates file");

    for(i = 1;;i++){
        Image pgm;
        Image geo;
        Matrix transform;

        fgets(line, FILE_LINE_LENGTH, eyeList);
        if(feof(eyeList)) break;

        if(sscanf(line,"%s %lf %lf %lf %lf",filename, &(source.x1), &(source.y1), &(source.x2), &(source.y2)) != 5){
            printf("Error parsing line %d of eye coordinate file. Exiting...",i);
            exit(1);
        }

        /* shift the eye coordinates if neccessary */
        source.x1 += args->shiftX;
        source.y1 += args->shiftY;
        source.x2 += args->shiftX;
        source.y2 += args->shiftY;

        sprintf(imagename,"%s\\%s.pgm",args->inputDir,filename);

        MESSAGE1ARG("Processing image: %s",filename);

        pgm = readPGMImage(imagename);

        if(args->histType == HIST_PRE){
            DEBUG(1,"   Performing Pre Histogram Equalization.");
            histEqual(pgm,256);
        }

        if(args->preNormType == CL_YES){
            DEBUG(1,"   Performing Pre Pixel Normalization.");
            ZeroMeanOneStdDev(pgm);
        }

        if(args->preEdge){
            smoothImageEdge(pgm, args->preEdge);
        }

        if(args->geoType == CL_YES){
            DEBUG(1,"   Performing Geometric Normalization.");
            transform = generateTransform(&source,&dest,args->reflect);
            geo = transformImage(pgm,args->sizeWidth,args->sizeHeight,transform);
        }
        else{
            transform = makeIdentityMatrix(3);
            geo = transformImage(pgm,args->sizeWidth,args->sizeHeight,transform);
        }

        if(args->noise != 0.0){
            DEBUG(1,"   Adding Gausian Noise.");
            gaussianNoise(geo,args->noise);
        }


        if(args->histType == HIST_POST){
            DEBUG(1,"   Performing Post Histogram Equalization.");
            histEqualMask(geo,256, (const char**) mask);
        }

        if(args->nrmType == CL_YES){
            DEBUG(1,"   Performing final value normalization and Applying Mask.");
            ZeroMeanOneStdDevMasked(geo, (const char **) mask);
        }
        else{
            DEBUG(1,"   No Value Normalization. Just Applying Mask.");
            applyMask(geo, (const char **) mask);
        }

        if(args->postEdge){
            smoothImageEdge(geo, args->postEdge);
        }

        if(args->nrmDir){
            sprintf(imagename,"%s\\%s%s.nrm", args->nrmDir, filename, suffix);
            DEBUG_STRING(1,"   Saving nrm: %s",imagename);
            writeFeretImage(geo,imagename);
        }
        if(args->pgmDir){
            sprintf(imagename,"%s\\%s%s.pgm", args->pgmDir, filename, suffix);
            DEBUG_STRING(1,"   Saving pgm: %s",imagename);
            writePGMImage(geo,imagename,0);
        }
        if(args->sfiDir){
            sprintf(imagename,"%s\\%s%s.sfi", args->sfiDir, filename, suffix);
            DEBUG_STRING(1,"   Saving sfi: %s",imagename);
            writeRawImage(geo,imagename);
        }

        freeImage(geo);
        freeImage(pgm);
        freeMatrix(transform);
    }

    fclose(eyeList);

}
Beispiel #15
0
void ImagePane::zoomRestore()
{
    zoom=1.0; //reset displayed image scale
    transformImage();
    update();
}
Beispiel #16
0
void ImagePane::rotateRestore()
{
    rotation=0;
    transformImage();
    update();
}
Beispiel #17
0
// MAIN
int main(int argc, char* argv[]) {

    try
    {
        DataParams params(argc, argv);

        params.doSanityChecks();
        int stacksize = params.shape(2);
        Size2D size2 (params.shape(0), params.shape(1)); // isnt' there a slicing operator?


        if(params.getVerbose()) {
            std::cout << "Images with Shape: " << params.shape() << std::endl;
            std::cout << "Processing a stack of " << stacksize << " images..." << std::endl;
        }


        // found spots. One Vector over all images in stack
        // the inner set contains all spots in the image
        std::vector<std::set<Coord<float> > > res_coords(stacksize);

        DImage res((size2-Diff2D(1,1))*params.getFactor()+Diff2D(1,1));
        // check if outfile is writable, otherwise throw error -> exit
        exportImage(srcImageRange(res), ImageExportInfo(params.getOutFile().c_str()));
        std::ofstream cf;
        if(!params.getCoordsFile().empty()) {
            cf.open(params.getCoordsFile());
            vigra_precondition(cf.is_open(), "Could not open coordinate-file for writing.");
        }

        USE_NESTED_TICTOC;
        //USETICTOC;
        TICPUSH;  // measure the time

        // STORM Algorithmut

        CliProgressFunctor func;
        wienerStorm(params, res_coords, func);

        // resulting image
        drawCoordsToImage<Coord<float> >(res_coords, res);

        int numSpots = 0;
        if(cf.is_open()) {
            numSpots = saveCoordsFile(params, cf, res_coords);
            cf.close();
        }

        // end: done.
        std::cout << std::endl << TOCS << std::endl;
        std::cout << "detected " << numSpots << " spots." << std::endl;

        // some maxima are very strong so we scale the image as appropriate :
        double maxlim = 0., minlim = 0;
        findMinMaxPercentile(res, 0., minlim, 0.996, maxlim);
        std::cout << "cropping output values to range [" << minlim << ", " << maxlim << "]" << std::endl;
        if(maxlim > minlim) {
            transformImage(srcImageRange(res), destImage(res), ifThenElse(Arg1()>Param(maxlim), Param(maxlim), Arg1()));
        }
        exportImage(srcImageRange(res), ImageExportInfo(params.getOutFile().c_str()));
        if (!params.getSettingsFile().empty())
            params.save();


    }
    catch (vigra::StdException & e)
    {
        std::cout<<"There was an error:"<<std::endl;
        std::cout << e.what() << std::endl;
        return 1;
    }

    return 0;
}
void CViGrA_Watershed::Segmentation(TImage_In &Input, TImage_Out &Output, double Scale, bool bEdges)
{
	typedef typename vigra::NumericTraits<typename TImage_In::value_type>::RealPromote	TmpType;

	vigra::BasicImage<TmpType>	gradientx	(Get_NX(), Get_NY());
	vigra::BasicImage<TmpType>	gradienty	(Get_NX(), Get_NY());
	vigra::FImage				gradientmag	(Get_NX(), Get_NY());
	vigra::IImage				labels		(Get_NX(), Get_NY());

	//-----------------------------------------------------
	// calculate the x- and y-components of the image gradient at given scale
	Process_Set_Text(_TL("calculate gradients"));

	recursiveFirstDerivativeX	(srcImageRange(Input)		, destImage(gradientx), Scale);
	recursiveSmoothY			(srcImageRange(gradientx)	, destImage(gradientx), Scale);

	recursiveFirstDerivativeY	(srcImageRange(Input)		, destImage(gradienty), Scale);
	recursiveSmoothX			(srcImageRange(gradienty)	, destImage(gradienty), Scale);

	//-----------------------------------------------------
	// transform components into gradient magnitude
	Process_Set_Text(_TL("calculate gradient magnitude"));

	combineTwoImages(
		srcImageRange(gradientx),
		srcImage(gradienty),
		destImage(gradientmag),
		GradientSquaredMagnitudeFunctor()
	);

	//-----------------------------------------------------
	// find the local minima of the gradient magnitude (might be larger than one pixel)
	Process_Set_Text(_TL("find local minima"));

	labels	= 0;

	extendedLocalMinima(srcImageRange(gradientmag), destImage(labels), 1);

	//-----------------------------------------------------
	// label the minima just found
	Process_Set_Text(_TL("label minima"));

	int max_region_label	= labelImageWithBackground(srcImageRange(labels), destImage(labels), false, 0);

	//-----------------------------------------------------
	// create a statistics functor for region growing
	vigra::ArrayOfRegionStatistics<vigra::SeedRgDirectValueFunctor<float> >gradstat(max_region_label);

	//-----------------------------------------------------
	// perform region growing, starting from the minima of the gradient magnitude;
	// as the feature (first input) image contains the gradient magnitude,
	// this calculates the catchment basin of each minimum
	Process_Set_Text(_TL("perform region growing"));

	seededRegionGrowing(srcImageRange(gradientmag), srcImage(labels), destImage(labels), gradstat);

	//-----------------------------------------------------
	// initialize a functor to determine the average gray-value or color for each region (catchment basin) just found
	vigra::ArrayOfRegionStatistics<vigra::FindAverage<TmpType> >averages(max_region_label);

	//-----------------------------------------------------
	// calculate the averages
	Process_Set_Text(_TL("calculate averages"));

	inspectTwoImages(srcImageRange(Input), srcImage(labels), averages);

	//-----------------------------------------------------
	// write the averages into the destination image (the functor 'averages' acts as a look-up table)
	transformImage(srcImageRange(labels), destImage(Output), averages);

	//-----------------------------------------------------
	// mark the watersheds (region boundaries) black
	if( bEdges )
	{
		regionImageToEdgeImage(srcImageRange(labels), destImage(Output), vigra::NumericTraits<typename TImage_Out::value_type>::zero());
	}
}