Exemplo n.º 1
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
                                   ofxCvGrayscaleImage& dad ) {

	if( !mom.bAllocated ){
		ofLog(OF_LOG_ERROR, "in absDiff, mom needs to be allocated");	
		return;	
	}
	if( !dad.bAllocated ){
		ofLog(OF_LOG_ERROR, "in absDiff, dad needs to be allocated");	
		return;	
	}	
	if( !bAllocated ){
		ofLog(OF_LOG_NOTICE, "in absDiff, allocating to match dimensions");			
		allocate(mom.getWidth(), mom.getHeight());
	}
									   
    ofRectangle roi = getROI();
    ofRectangle momRoi = mom.getROI();
    ofRectangle dadRoi = dad.getROI();
    if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
        (dadRoi.width == roi.width && dadRoi.height == roi.height ) )
    {
        cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in absDiff, images are different sizes");
    }

}
Exemplo n.º 2
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
                                   ofxCvGrayscaleImage& dad ) {

	if( !mom.bAllocated ){
		ofLogError("ofxCvGrayscaleImage") << "absDiff(): first source image (mom) not allocated";
		return;	
	}
	if( !dad.bAllocated ){
		ofLogError("ofxCvGrayscaleImage") << "absDiff(): second source image (dad) not allocated";
		return;	
	}	
	if( !bAllocated ){
		ofLogNotice("ofxCvGrayscaleImage") << "absDiff(): allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}
									   
    ofRectangle roi = getROI();
    ofRectangle momRoi = mom.getROI();
    ofRectangle dadRoi = dad.getROI();
    if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
        (dadRoi.width == roi.width && dadRoi.height == roi.height ) )
    {
        cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
        flagImageChanged();
    } else {
        ofLogError("ofxCvGrayscaleImage") << "absDiff(): source image size mismatch between first (mom) & second (dad) image";
    }
}
Exemplo n.º 3
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
	if( !bAllocated ){
		ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image not allocated";	
		return;	
	}
	
    ofRectangle roi = getROI();
    ofRectangle redRoi = red.getROI();
    ofRectangle greenRoi = green.getROI();
    ofRectangle blueRoi = blue.getROI();
	
	if( !red.bAllocated ){
		red.allocate(width, height);
	} 
	if( !green.bAllocated ){
		green.allocate(width, height);
	} 
	if( !blue.bAllocated ){
		blue.allocate(width, height);
	} 
			
	if( redRoi.width == roi.width && redRoi.height == roi.height &&
        greenRoi.width == roi.width && greenRoi.height == roi.height &&
        blueRoi.width == roi.width && blueRoi.height == roi.height )
    {
        cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL);
        red.flagImageChanged();
        green.flagImageChanged();
        blue.flagImageChanged();
	} else {
        ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch";
	}
}
Exemplo n.º 4
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
                                   ofxCvGrayscaleImage& dad ) {
    ofRectangle roi = getROI();
    ofRectangle momRoi = mom.getROI();
    ofRectangle dadRoi = dad.getROI();
    if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
        (dadRoi.width == roi.width && dadRoi.height == roi.height ) )
    {
        cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in absDiff, images are different sizes");
    }

}
Exemplo n.º 5
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::setFromGrayscalePlanarImages( ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
	ofRectangle roi = getROI();
    ofRectangle redRoi = red.getROI();
    ofRectangle greenRoi = green.getROI();
    ofRectangle blueRoi = blue.getROI();
    if( redRoi.width == roi.width && redRoi.height == roi.height &&
        greenRoi.width == roi.width && greenRoi.height == roi.height &&
        blueRoi.width == roi.width && blueRoi.height == roi.height )
    {
         cvCvtPlaneToPix(red.getCvImage(), green.getCvImage(), blue.getCvImage(),NULL, cvImage);
         flagImageChanged();
	} else {
        ofLog(OF_LOG_ERROR, "in setFromGrayscalePlanarImages, ROI/size mismatch");
	}
}
Exemplo n.º 6
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){
	if( !bAllocated ){
		ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImage(): image not allocated";	
		return;	
	}
	
	if( !grayImage.bAllocated ){
		grayImage.allocate(width, height);
	} 
		
	ofRectangle roi = getROI();
    ofRectangle grayRoi = grayImage.getROI();
   
	if( grayRoi.width == roi.width && grayRoi.height == roi.height ){

		switch (whichPlane){
				
			case 0:
				cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL);
				grayImage.flagImageChanged();
				break;
			case 1:
				cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL);
				grayImage.flagImageChanged();
				break;
			case 2:
				cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL);
				grayImage.flagImageChanged();
				break;
		}
			
	} else {
		ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch";
	}
}
Exemplo n.º 7
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){
	
	ofRectangle roi = getROI();
    ofRectangle grayRoi = grayImage.getROI();
   
	if( grayRoi.width == roi.width && grayRoi.height == roi.height ){

		switch (whichPlane){
				
			case 0:
				cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL);
				grayImage.flagImageChanged();
				break;
			case 1:
				cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL);
				grayImage.flagImageChanged();
				break;
			case 2:
				cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL);
				grayImage.flagImageChanged();
				break;
		}
			
	} else {
    
		ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, ROI/size mismatch");
	
	}
	
}
Exemplo n.º 8
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
    ofRectangle roi = getROI();
    ofRectangle redRoi = red.getROI();
    ofRectangle greenRoi = green.getROI();
    ofRectangle blueRoi = blue.getROI();
	if( redRoi.width == roi.width && redRoi.height == roi.height &&
        greenRoi.width == roi.width && greenRoi.height == roi.height &&
        blueRoi.width == roi.width && blueRoi.height == roi.height )
    {
        cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL);
        red.flagImageChanged();
        green.flagImageChanged();
        blue.flagImageChanged();
	} else {
        ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, ROI/size mismatch");
	}
}
Exemplo n.º 9
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom ) {
    if( matchingROI(getROI(), mom.getROI()) ) {
        cvAbsDiff( cvImage, mom.getCvImage(), cvImageTemp );
        swapTemp();
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
    }
}
Exemplo n.º 10
0
//--------------------------------------------------------------------------------
void ofxCvFloatImage::addWeighted( ofxCvGrayscaleImage& mom, float f ) {
	if( matchingROI(getROI(), mom.getROI()) ) {
        convertGrayToFloat(mom.getCvImage(), cvImageTemp);
        cvAddWeighted( cvImageTemp, f, cvImage, 1.0f-f,0, cvImage );
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in addWeighted, ROI mismatch");
    }
}
Exemplo n.º 11
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::setFromGrayscalePlanarImages( ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
	ofRectangle roi = getROI();
    ofRectangle redRoi = red.getROI();
    ofRectangle greenRoi = green.getROI();
    ofRectangle blueRoi = blue.getROI();
	
	if( !bAllocated ){
		ofLogNotice("ofxCvColorImage") << "setFromGrayscalePlanarImages(): allocating to match dimensions";		
		allocate(red.getWidth(), red.getHeight());
	}
	
    if( redRoi.width == roi.width && redRoi.height == roi.height &&
        greenRoi.width == roi.width && greenRoi.height == roi.height &&
        blueRoi.width == roi.width && blueRoi.height == roi.height )
    {
         cvCvtPlaneToPix(red.getCvImage(), green.getCvImage(), blue.getCvImage(),NULL, cvImage);
         flagImageChanged();
	} else {
        ofLogError("ofxCvColorImage") << "setFromGrayscalePlanarImages(): image size or region of interest mismatch";
	}
}
Exemplo n.º 12
0
//--------------------------------------------------------------------------------
void ofxCvShortImage::addWeighted( ofxCvGrayscaleImage& mom, float f ) {
	if( !bAllocated ){
		ofLog(OF_LOG_ERROR, "in addWeighted, image is not allocated");		
		return;	
	}
	
	if( matchingROI(getROI(), mom.getROI()) ) {
        convertGrayToShort(mom.getCvImage(), cvImageTemp);
        cvAddWeighted( cvImageTemp, f, cvImage, 1.0f-f,0, cvImage );
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in addWeighted, ROI mismatch");
    }
}
Exemplo n.º 13
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom ){
	if( !mom.bAllocated ){
		ofLog(OF_LOG_ERROR, "in absDiff, mom needs to be allocated");	
		return;	
	}
	if( !bAllocated ){
		ofLog(OF_LOG_NOTICE, "in absDiff, allocating to match dimensions");			
		allocate(mom.getWidth(), mom.getHeight());
	}	

    if( matchingROI(getROI(), mom.getROI()) ) {
        cvAbsDiff( cvImage, mom.getCvImage(), cvImageTemp );
        swapTemp();
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
    }
}
Exemplo n.º 14
0
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom ){
	if( !mom.bAllocated ){
		ofLogError("ofxCvGrayscaleImage") << "absDiff(): source image not allocated";
		return;	
	}
	if( !bAllocated ){
		ofLogNotice("ofxCvGrayscaleImage") << "absDiff(): allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}	

    if( matchingROI(getROI(), mom.getROI()) ) {
        cvAbsDiff( cvImage, mom.getCvImage(), cvImageTemp );
        swapTemp();
        flagImageChanged();
    } else {
        ofLogError("ofxCvGrayscaleImage") << "absDiff(): region of interest mismatch";
    }
}
//--------------------------------------------------------------------------------
int ofxCvContourFinder::findContours( ofxCvGrayscaleImage&  input,
									  int minArea,
									  int maxArea,
									  int nConsidered,
									  bool bFindHoles,
                                      bool bUseApproximation) {

    // get width/height disregarding ROI
    IplImage* ipltemp = input.getCvImage();
    _width = ipltemp->width;
    _height = ipltemp->height;

	reset();

	// opencv will clober the image it detects contours on, so we want to
    // copy it into a copy before we detect contours.  That copy is allocated
    // if necessary (necessary = (a) not allocated or (b) wrong size)
	// so be careful if you pass in different sized images to "findContours"
	// there is a performance penalty, but we think there is not a memory leak
    // to worry about better to create mutiple contour finders for different
    // sizes, ie, if you are finding contours in a 640x480 image but also a
    // 320x240 image better to make two ofxCvContourFinder objects then to use
    // one, because you will get penalized less.

	if( inputCopy.getWidth() == 0 ) {
		inputCopy.setUseTexture(false);
		inputCopy.allocate( _width, _height );
	} else if( inputCopy.getWidth() != _width || inputCopy.getHeight() != _height ) {
        // reallocate to new size
        inputCopy.clear();
		inputCopy.setUseTexture(false);		
        inputCopy.allocate( _width, _height );
	}

    inputCopy.setROI( input.getROI() );
    inputCopy = input;

	CvSeq* contour_list = NULL;
	contour_storage = cvCreateMemStorage( 1000 );
	storage	= cvCreateMemStorage( 1000 );

	CvContourRetrievalMode  retrieve_mode
        = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL;
	cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list,
                    sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE );
	CvSeq* contour_ptr = contour_list;

	// put the contours from the linked list, into an array for sorting
	while( (contour_ptr != NULL) ) {
		float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) );
		if( (area > minArea) && (area < maxArea) ) {
            cvSeqBlobs.push_back(contour_ptr);
		}
		contour_ptr = contour_ptr->h_next;
	}


	// sort the pointers based on size
	if( cvSeqBlobs.size() > 1 ) {
        sort( cvSeqBlobs.begin(), cvSeqBlobs.end(), sort_carea_compare );
	}


	// now, we have cvSeqBlobs.size() contours, sorted by size in the array
    // cvSeqBlobs let's get the data out and into our structures that we like
	for( int i = 0; i < MIN(nConsidered, (int)cvSeqBlobs.size()); i++ ) {
		blobs.push_back( ofxCvBlob() );
		float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ );
		CvRect rect	= cvBoundingRect( cvSeqBlobs[i], 0 );
		cvMoments( cvSeqBlobs[i], myMoments );

		blobs[i].area                     = fabs(area);
		blobs[i].hole                     = area < 0 ? true : false;
		blobs[i].length 			      = cvArcLength(cvSeqBlobs[i]);
		blobs[i].boundingRect.x           = rect.x;
		blobs[i].boundingRect.y           = rect.y;
		blobs[i].boundingRect.width       = rect.width;
		blobs[i].boundingRect.height      = rect.height;
		blobs[i].centroid.x 			  = (myMoments->m10 / myMoments->m00);
		blobs[i].centroid.y 			  = (myMoments->m01 / myMoments->m00);

		// get the points for the blob:
		CvPoint           pt;
		CvSeqReader       reader;
		cvStartReadSeq( cvSeqBlobs[i], &reader, 0 );

    	for( int j=0; j < cvSeqBlobs[i]->total; j++ ) {
			CV_READ_SEQ_ELEM( pt, reader );
            blobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) );
		}
		blobs[i].nPts = blobs[i].pts.size();

	}

    nBlobs = blobs.size();

	// Free the storage memory.
	// Warning: do this inside this function otherwise a strange memory leak
	if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); }
	if( storage != NULL ) { cvReleaseMemStorage(&storage); }

	return nBlobs;

}
Exemplo n.º 16
0
//--------------------------------------------------------------------------------
int ofxCvMtemplate::findMTemplate( ofxCvGrayscaleImage&  input,
                                      ofxCvGrayscaleImage&  OF_imgTemplate) {
    //Calculate New Result Dimensions
	res_width  = input.getWidth() - OF_imgTemplate.getWidth() + 1;
	res_height = input.getWidth() - OF_imgTemplate.getWidth() + 1;

    /// allocate the res image
    IplImage	*res;
	/* create new image for template matching computation */
	res = cvCreateImage( cvSize( res_width, res_height ), IPL_DEPTH_32F, 1 );


    //////////////////////////////////
    // Could be fine optimize and use the others methods
    // Just for testing I've used the first method -> CV_TM_SQDIFF_NORMED
    // and printed to the console the matching results

    /* choose template matching method to be used */
	//cvMatchTemplate( inputCopy.getCvImage(), templateCopy.getCvImage(), resMtemplate.getCvImage(), CV_TM_SQDIFF );
	/*cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF_NORMED );
	cvMatchTemplate( img, tpl, res, CV_TM_CCORR );
	cvMatchTemplate( img, tpl, res, CV_TM_CCORR_NORMED );
	cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF );
	cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF_NORMED );*/

    bool MTverbose = true;
    if(MTverbose)
    {
        printf("OF_imgTemplate ROI height = %f\n", OF_imgTemplate.getROI().height);
        printf("OF_imgTemplate ROI width = %f\n", OF_imgTemplate.getROI().width);
        //input.getROI()
        printf("input.getROI()  height = %f\n", input.getROI().height);
        printf("input.getROI()  width = %f\n", input.getROI().width);
    }

    if(OF_imgTemplate.getROI().height == input.getROI().height && OF_imgTemplate.getROI().width == input.getROI().width)
    {

        cvMatchTemplate( input.getCvImage(), OF_imgTemplate.getCvImage(), res, CV_TM_CCORR_NORMED ); //CV_TM_CCORR_NORMED

        if(MTverbose)
        {
            printf("cvMatchTemplate returned\n");

            printf("cnumber of channels of iplimage: %d\n", res->nChannels);
            printf("depth of iplimage: %d\n", res->depth);

            printf("cvMinMaxLoc\n");
        }

        cvMinMaxLoc( res, &minval, &maxval, &minloc, &maxloc, 0 );

        if(MTverbose)
        {
            printf("minval=[%e]\n",minval);
            printf("maxval=[%e]\n",maxval);

            printf("minlocX=[%i]\n",minloc.x);
            printf("minlocY=[%i]\n",minloc.y);

            printf("maxlocX=[%i]\n",maxloc.x);
            printf("maxlocY=[%i]\n",maxloc.y);
        }

    }

    ///FREE OPENCV MEMORY
    cvReleaseImage( & res);

}