Ejemplo n.º 1
0
//形态学约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTC(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp4 = cvCreateImage(cvGetSize(src), 8, 1);

	IplImage*  mask1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask4 = cvCreateImage(cvGetSize(src), 8, 1);

	cvZero(mask1);
	cvZero(mask2);
	cvZero(mask3);
	cvZero(mask4);

	cvZero(dst);

	//P107 (5.5)
	cvErode( src, temp1, sebg);
	cvDilate( src, temp2, sebg);
	cvErode( src, temp3, sefg);
	cvDilate( src, temp4, sefg);

	cvCmp(src, temp3, mask1, CV_CMP_EQ);
	cvCmp(temp2, src,  mask2, CV_CMP_LT);
	cvAnd(mask1, mask2, mask2);

	cvCmp(src, temp4, mask3 , CV_CMP_EQ);
	cvCmp(temp1, src, mask4 , CV_CMP_GT);
	cvAnd(mask3, mask4, mask4);

	cvSub(src, temp2, dst, mask2);
	cvSub(temp1, src, dst, mask4);




	cvReleaseImage(&mask1);
	cvReleaseImage(&mask2);
	cvReleaseImage(&mask3);
	cvReleaseImage(&mask4);

	cvReleaseImage(&temp1);
	cvReleaseImage(&temp2);
	cvReleaseImage(&temp3);
	cvReleaseImage(&temp4);

	cvReleaseStructuringElement(&sebg);

}
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 3) {
		AddError(wxT("The input image is not a color image."));
		return;
	}

	// Check and update the background
	if (! mOutputImage) {
	  mOutputImage = cvCloneImage(inputimage);
	} else {
	  cvCopyImage(inputimage, mOutputImage);
	}
	if (! mBackgroundImage) {
		mBackgroundImage = cvCloneImage(mOutputImage);
	} else if (mUpdateProportion > 0) {
		if ((cvGetSize(mOutputImage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(mOutputImage).width != cvGetSize(mBackgroundImage).width)) {
			AddError(wxT("Input and background images do not have the same size."));
			return;
		}

		cvAddWeighted(mOutputImage, mUpdateProportion, mBackgroundImage, 1.0 - mUpdateProportion, 0, mBackgroundImage);
	}

	try {
		// Correct the tmpImage with the difference in image mean
		if (mCorrectMean) {
			mBackgroundImageMean = cvAvg(mBackgroundImage);
			CvScalar tmpScalar = cvAvg(mOutputImage);
			cvAddS(mOutputImage, cvScalar(mBackgroundImageMean.val[0] - tmpScalar.val[0], mBackgroundImageMean.val[1] - tmpScalar.val[1], mBackgroundImageMean.val[2] - tmpScalar.val[2]), mOutputImage);
		}

		// Background subtraction
		if (mMode == sMode_SubImageBackground) {
			cvSub(mOutputImage, mBackgroundImage, mOutputImage);
		} else if (mMode == sMode_SubBackgroundImage) {
			cvSub(mBackgroundImage, mOutputImage, mOutputImage);
		} else {
			cvAbsDiff(mOutputImage, mBackgroundImage, mOutputImage);
		}
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}
	mCore->mDataStructureImageColor.mImage = mOutputImage;
	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mOutputImage);
	}
}
Ejemplo n.º 3
0
/**
 * \brief	Takes frame and applies image processing techniques to filter out non-laser line points. Updates images used for runtime display.
 */
int filterFrame() {
	args[0] = frame;
	cvCvtColor(frame, frameHSV, CV_BGR2HSV);	//convert RGB values of frame to HSV and place in frameHSV
	cvSplit(frameHSV, hue, saturation, value, NULL);	//split frameHSV into constituent components and place appropriately; we are done with frameHSV
	args[1] = hue;
	args[2] = value;
	cvCopy(saturation, saturation2);	//make an additional copy of saturation for display
	//args[8] = saturation2;
	//cvShowImage("saturation", saturation2);
	cvSmooth(frame, frameHSV, CV_BLUR, 20, 20 );   //smooth frame and store in frameHSV
	//cvShowImage("Smoothed frame", frameHSV);
	cvSplit(frame, blue, green, red, NULL);	//split frame into its RGB components
	cvSplit(frameHSV, blue2, green2, red2, NULL);	//split the smoothed version into its RGB components
	cvMin(blue, green, min_bg);	//take the min of blue and green and store in min_bg
	args[3] = min_bg;
	//cvShowImage("minimum of blue and green", min_bg);
	cvSub(red, min_bg, red_last);	//take red less the min of the blue and green
	//cvShowImage("red_last = red - min_bg", red_last);
	cvThreshold(red_last, red_last, thresholdValue, 255, CV_THRESH_BINARY_INV);	//threshold the red_last
	//cvShowImage("threshold of red_last", red_last);
	args[4] = red_last;
	cvSub(red, red2, deltaRed);
	//cvShowImage("deltaRed = Original red - smooth red", deltaRed);
	cvThreshold(deltaRed, deltaRed, thresholdValue, 255, CV_THRESH_BINARY);
	//cvShowImage("threshold(deltaRed)", deltaRed);
	cvCopy(deltaRed, alpha);
	cvInRangeS(saturation, cvScalar(0), cvScalar(25), saturation);
	//cvShowImage("Low saturation in original frame", saturation);
	cvInRangeS(hue, cvScalar(49), cvScalar(125), beta);
	//cvShowImage("Mixed hue in original frame", beta);
	cvOr(beta, saturation, beta);
	//cvShowImage("beta = Low saturation OR mixed hue", beta);
	cvOr(beta, red_last, beta);
	//cvShowImage("beta = beta OR red_last", beta);
	//args[5] = alpha;
	args[5] = beta;

	IplConvKernel*mask= cvCreateStructuringElementEx(5, 5, 2, 2, 2, NULL );

	cvDilate(saturation2,dialated, mask, 20);
	//cvShowImage("dilate original saturation", dialated);
	args[6] = dialated;
	cvThreshold(dialated, dialated, 100, 255, CV_THRESH_BINARY);
	cvErode(dialated,eroded, mask, 30);

	args[7] = eroded;
	cvSub(alpha, beta, orig_filter);
	args[8] = orig_filter;
	cvAnd(orig_filter, eroded, zeta);
	args[9] = zeta;
	return 0;
}
Ejemplo n.º 4
0
CV_IMPL void
cvMorphologyEx( const void* src, void* dst,
                void* temp, IplConvKernel* element, int op, int iterations )
{
    CV_FUNCNAME( "cvMorhologyEx" );

    __BEGIN__;

    if( (op == CV_MOP_GRADIENT ||
        ((op == CV_MOP_TOPHAT || op == CV_MOP_BLACKHAT) && src == dst)) && temp == 0 )
        CV_ERROR( CV_HeaderIsNull, "temp image required" );

    if( temp == src || temp == dst )
        CV_ERROR( CV_HeaderIsNull, "temp image is equal to src or dst" );

    switch (op)
    {
    case CV_MOP_OPEN:
        CV_CALL( cvErode( src, dst, element, iterations ));
        CV_CALL( cvDilate( dst, dst, element, iterations ));
        break;
    case CV_MOP_CLOSE:
        CV_CALL( cvDilate( src, dst, element, iterations ));
        CV_CALL( cvErode( dst, dst, element, iterations ));
        break;
    case CV_MOP_GRADIENT:
        CV_CALL( cvErode( src, temp, element, iterations ));
        CV_CALL( cvDilate( src, dst, element, iterations ));
        CV_CALL( cvSub( dst, temp, dst ));
        break;
    case CV_MOP_TOPHAT:
        if( src != dst )
            temp = dst;
        CV_CALL( cvErode( src, temp, element, iterations ));
        CV_CALL( cvDilate( temp, temp, element, iterations ));
        CV_CALL( cvSub( src, temp, dst ));
        break;
    case CV_MOP_BLACKHAT:
        if( src != dst )
            temp = dst;
        CV_CALL( cvDilate( src, temp, element, iterations ));
        CV_CALL( cvErode( temp, temp, element, iterations ));
        CV_CALL( cvSub( temp, src, dst ));
        break;
    default:
        CV_ERROR( CV_StsBadArg, "unknown morphological operation" );
    }

    __END__;
}
Ejemplo n.º 5
0
// Get match area with two same size image, return area (by pixel count)
double MatchAreaCount(IplImage* imgfrom, IplImage* imgto) {
	cvSub(imgfrom, imgto, imgto, 0); 			// 图像相减
	cvThreshold(imgto, imgto, 1, 255, CV_THRESH_BINARY); 	// 阈值处理
	int white = 0, black, total;
	for (int y = 0; y < imgto->height; y++) {
		for (int x = 0; x < imgto->width; x++) {
			const int val = imgto->imageData[y * imgto->widthStep
					+ x];
			if (val & 1) {
				++white; 			// 白色区域
			}
		}
	}
	total = imgto->width * imgto->height; 			// 总面积
	black = total - white; 					// 黑色面积

	if (globalArgs.verbosity) {
		printf("white = %d  black = %d  total = %d\n", white, black,
				total);
		cvNamedWindow("Test", CV_WINDOW_AUTOSIZE);
		cvShowImage("Test", imgto);
		cvWaitKey(0);
		cvDestroyWindow("Test");
	}
	return black;
}
Ejemplo n.º 6
0
void thresholdCalculator::calculateAverages(ofxCvGrayscaleAdvanced & smallCurrentImg, ofxCvGrayscaleAdvanced & maskImg, ofRectangle & targetRect) {
	
	roi.x = targetRect.x / divisor;
	roi.y = targetRect.y / divisor;
	
	maskImg.setROI(roi);
	smallCurrentImg.setROI(roi);
	
	CvScalar tempPupilAvg = cvAvg(smallCurrentImg.getCvImage(), maskImg.getCvImage());
	cvNot(maskImg.getCvImage(), notDiffImg.getCvImage());
	pupilAvg = tempPupilAvg.val[0];
	
	// get average of pupil black iteratively(get average twice) to remove the influence of glint
	cvThreshold(smallCurrentImg.getCvImage(), farFromAvg, pupilAvg + 30, 255, CV_THRESH_BINARY);		// 30 is the distance from average.
	cvSub(maskImg.getCvImage(), farFromAvg, newMask);								// make a mask to get rid of those far points.
	CvScalar newPupilAvg = cvAvg(smallCurrentImg.getCvImage(), newMask);			// get new average value.
	
	// get average, min and max value of white area of an eye.
	CvScalar tempWhiteAvg = cvAvg(smallCurrentImg.getCvImage(), notDiffImg.getCvImage());
	for (int i = 0; i < 6; i++) notDiffImg.erode();				// this might be very useful to reduce the influence of small noise & glint
	cvMinMaxLoc(smallCurrentImg.getCvImage(), &whiteMin, &whiteMax, &whiteLocMin, &whiteLocMax, notDiffImg.getCvImage());

	maskImg.resetROI();
	smallCurrentImg.resetROI();
	
	pupilAvg = newPupilAvg.val[0];					// value is in the first element of CvScalar
	whiteAvg = tempWhiteAvg.val[0];
	
}
Ejemplo n.º 7
0
void display(struct ctx *ctx)
{
    int i;
    static IplImage *oldimage = NULL;

    /*if (ctx->num_fingers == NUM_FINGERS)
    {

#if defined(SHOW_HAND_CONTOUR)
        cvDrawContours(ctx->image, ctx->contour,
                       CV_RGB(0,0,255), CV_RGB(0,255,0),
                       0, 1, CV_AA, cvPoint(0,0));
#endif


        cvCircle(ctx->image, ctx->hand_center, 5, CV_RGB(255, 255, 0),
                 1, CV_AA, 0);
        cvCircle(ctx->image, ctx->hand_center, ctx->hand_radius,
                 CV_RGB(255, 0, 0), 1, CV_AA, 0);

        for (i = 0; i < ctx->num_fingers; i++)
        {

            cvCircle(ctx->image, ctx->fingers[i], 10,
                     CV_RGB(0, 255, 0), 3, CV_AA, 0);

            cvLine(ctx->image, ctx->hand_center, ctx->fingers[i],
                   CV_RGB(255,255,0), 1, CV_AA, 0);
        }

        for (i = 0; i < ctx->num_defects; i++)
        {
            cvCircle(ctx->image, ctx->defects[i], 2,
                     CV_RGB(200, 200, 200), 2, CV_AA, 0);
        }
    }*/

    cvShowImage("output", ctx->image);
    IplImage *dst;
    if ( oldimage != NULL ) {
        dst = cvCloneImage(ctx->image);

        cvSub(ctx->image,oldimage,dst,NULL);

        cvShowImage("thresholded", dst);

        cvAddWeighted(oldimage, 0.25, ctx->image, 0.75, 0.0, oldimage);


        cvReleaseImage(&dst);
        //cvReleaseImage(&oldimage);


    }
    else {
        cvShowImage("thresholded", ctx->thr_image);
        oldimage=cvCloneImage(ctx->image);

    }
}
void DifferenceOfGaussian::output(IplImage* src, IplImage* dst){
	CvArr* dog1 = cvCreateImage(cvGetSize(src), src->depth, src->nChannels);
	CvArr* dog2 = cvCreateImage(cvGetSize(src), src->depth, src->nChannels);
	cvSmooth(src, dog1, CV_GAUSSIAN, _kernelSize, _kernelSize, _sigma1, _sigma1);
	cvSmooth(src, dog2, CV_GAUSSIAN, _kernelSize, _kernelSize, _sigma2, _sigma2);
	cvSub(dog2, dog1, dst, 0);
}
Ejemplo n.º 9
0
void moHighpassModule::applyFilter(IplImage *src) {
	int b1 = this->property("size").asInteger()*2+1; //make sure its odd
	int b2 = this->property("blur").asInteger()*2+1; //make sure its odd
	cvSmooth(src, this->output_buffer, CV_GAUSSIAN, b1);
	cvSub(src, this->output_buffer, this->output_buffer);
	cvSmooth(this->output_buffer, this->output_buffer, CV_GAUSSIAN, b2);
}
Ejemplo n.º 10
0
/**
 * Обновление выводимой информации.
 */
void EdgeDetector::update() {
	if (camera == NULL) return;

	cvWaitKey(33);

	cameraFrame = cvQueryFrame(camera);
	cvReleaseImage(&resultFrame);
	
	if (isGrayScaleEffect) {
		IplImage* tempFrame = cvCloneImage(cameraFrame);
		resultFrame = cvCreateImage(imageSize, cameraFrame->depth, CV_LOAD_IMAGE_GRAYSCALE);
		cvCvtColor(tempFrame, resultFrame, CV_BGR2GRAY);
		cvReleaseImage(&tempFrame);
	} else resultFrame = cvCloneImage(cameraFrame);

	if (!isOriginalEffect) {
		if (isStrokeEffect) {
			IplImage* tempFrame = cvCloneImage(resultFrame);
			tempFrame = edgeDetectOperator->applyOperator(tempFrame);
			cvSub(resultFrame, tempFrame, resultFrame);
			cvReleaseImage(&tempFrame);
		} else resultFrame = edgeDetectOperator->applyOperator(resultFrame);
	}

	if (isInverseEffect) {
		IplImage* tempFrame = cvCloneImage(resultFrame);
		cvNot(tempFrame, resultFrame);
		cvReleaseImage(&tempFrame);
	}

	cvShowImage(getWindowName(), resultFrame);
}
Ejemplo n.º 11
0
//形态学黑帽重建
void lhMorpRBTH(const IplImage* src, IplImage* dst, IplConvKernel* se = NULL, int iterations=1)
{
	assert(src != NULL  && dst != NULL && src != dst );
	//p156
	lhMorpRClose(src, dst, se, iterations);
	cvSub(dst, src, dst);
}
Ejemplo n.º 12
0
//形态学非约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTU(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask = cvCreateImage(cvGetSize(src), 8, 1);
	cvZero(mask);

	//P106 (5.4)
	cvErode( src, temp, sefg);
	cvDilate(src, dst, sebg);
	cvCmp(temp, dst, mask, CV_CMP_GT);

	cvSub(temp, dst, dst, mask);
	cvNot(mask, mask);
	cvSet(dst, cvScalar(0), mask);

	//cvCopy(dst, mask);
	//cvSet(dst, cvScalar(255), mask);
	cvReleaseImage(&mask);
	cvReleaseImage(&temp);

	cvReleaseStructuringElement(&sebg);
}
Ejemplo n.º 13
0
//--------------------------------------------------------------------------------
void ofxCvImage::operator -= ( ofxCvImage& mom ) {
	if( !mom.bAllocated ){
		ofLogError("ofxCvImage") << "operator-=: source image not allocated";	
		return;	
	}
	if( !bAllocated ){
		ofLogNotice("ofxCvImage") << "operator-=: allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}

	if( mom.getCvImage()->nChannels == cvImage->nChannels &&
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( matchingROI(getROI(), mom.getROI()) ) {
            cvSub( cvImage, mom.getCvImage(), cvImageTemp );
            swapTemp();
            flagImageChanged();
        } else {
            ofLogError("ofxCvImage") << "operator-=: region of interest mismatch";
        }
	} else {
        ofLogError("ofxCvImage") << "operator-=: image type mismatch";
	}
}
/*	The function will return the connected components in 'comp', 
	as well as the number of connected components 'nc'.
	At this point, we have to determine whether the components are eye pair or not.
	We'll use experimentally derived heuristics for this, based on the width, 
	height, vertical distance, and horizontal distance of the components. 
	To make things simple, we only proceed if the number of the connected components is 2.*/
int get_connected_components(IplImage* img, IplImage* prev, CvRect window, CvSeq** comp)
{
		IplImage* _diff;
 
		cvZero(diff);
 
    /* apply search window to images */
		cvSetImageROI(img, window);
		cvSetImageROI(prev, window);
		cvSetImageROI(diff, window);
 
    /* motion analysis */
		cvSub(img, prev, diff, NULL);
		cvThreshold(diff, diff, 5, 255, CV_THRESH_BINARY);
		cvMorphologyEx(diff, diff, NULL, kernel, CV_MOP_OPEN, 1);
 
    /* reset search window */
		cvResetImageROI(img);
		cvResetImageROI(prev);
		cvResetImageROI(diff);
 
		_diff = (IplImage*)cvClone(diff);
 
    /* get connected components */
		int nc = cvFindContours(_diff, storage, comp, sizeof(CvContour),
                            CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
 
		cvClearMemStorage(storage);		
		cvReleaseImage(&_diff);
	
		return nc;
}
Ejemplo n.º 15
0
//形态学黑顶帽运算
void lhMorpBlackTopHat(const IplImage* src, IplImage* dst, IplConvKernel* se=NULL, int iterations=1)
{
	assert(src != NULL && dst != NULL && src != dst);
	lhMorpClose(src, dst, se, iterations );
    cvSub(dst, src, dst );

}
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 3) {
		AddError(wxT("The input image is not a color image."));
		return;
	}

	// Check and update the background
	if (! mFinalImage) {
		mFinalImage = cvCloneImage(inputimage);
	} else if (mMode == sMode_Addition) {
		cvAdd(mFinalImage, inputimage, mFinalImage);
	} else if (mMode == sMode_Subtraction) {
		cvSub(mFinalImage, inputimage, mFinalImage);
	} else if (mMode == sMode_Multiplication) {
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mFinalImage);
	}
}
Ejemplo n.º 17
0
void cvSoftmaxDer(CvMat * X, CvMat * dE_dY, CvMat * dE_dY_afder) {
  CV_FUNCNAME("cvSoftmaxDer");
  __BEGIN__;
  const int nr = X->rows, nc = X->cols, dtype = CV_MAT_TYPE(X->type);
  CvMat * Y = cvCreateMat(nr, nc, dtype);
  CvMat * dE_dY_transpose = cvCreateMat(nr, nc, dtype);
  CvMat * sum = cvCreateMat(nr, 1, dtype);
  CvMat * sum_repeat = cvCreateMat(nr, nc, dtype);
  cvSoftmax(X, Y);
  if (dE_dY->rows==nc && dE_dY->cols==nr){
    cvTranspose(dE_dY,dE_dY_transpose);
    cvMul(Y,dE_dY_transpose,dE_dY_afder);
  }else{
    cvMul(Y,dE_dY,dE_dY_afder);
  }
  cvReduce(dE_dY_afder,sum,-1,CV_REDUCE_SUM);
  cvRepeat(sum,sum_repeat);
  cvMul(Y,sum_repeat,sum_repeat);
  cvSub(dE_dY_afder,sum_repeat,dE_dY_afder);
  cvReleaseMat(&dE_dY_transpose);
  cvReleaseMat(&sum);
  cvReleaseMat(&sum_repeat);
  cvReleaseMat(&Y);
  __END__;
}
Ejemplo n.º 18
0
int main( int argc, char** argv ) {

    // CAPTURE VIDEO
    CvCapture* capture;
    capture = cvCreateFileCapture("trafficDay.avi");
    if( capture == NULL ) {
        fprintf (stderr, "cannot open file\n");
        exit(1);
    }


    // ALLOCATE SPACE FOR IplImage STRUCTURES.
    // DEFAULT ONE FROM VIDEO.
    IplImage *big_frame = cvQueryFrame(capture);
    IplImage *frame = cvCreateImage (cvSize(DEFAULT_SIZE(0), DEFAULT_SIZE(1)), big_frame->depth, big_frame->nChannels);
    IplImage *last_frame = cvCreateImage (cvSize(DEFAULT_SIZE(0), DEFAULT_SIZE(1)), big_frame->depth, big_frame->nChannels);
    IplImage *diff = cvCreateImage (cvSize(DEFAULT_SIZE(0), DEFAULT_SIZE(1)), big_frame->depth, big_frame->nChannels);
    cvResize (big_frame, last_frame, CV_INTER_LINEAR );


    // CREATE WINDOWS AND LOCATE THEM TO TOP LEFT OF THE SCREEN
    cvNamedWindow( "Video", CV_WINDOW_AUTOSIZE);
    cvNamedWindow( "Difference", CV_WINDOW_AUTOSIZE);
    cvMoveWindow ("Video", 100, 0);
    cvMoveWindow ("Difference", 100, 500);


    // MAIN LOOP
    while((big_frame = cvQueryFrame (capture)) != NULL) {
        // RESIZE IMAGE FROM VIDEO
        cvResize (big_frame, frame, CV_INTER_LINEAR );
        // SHOW CAPTURED AND RESIZED IMAGE
        cvShowImage("Video", frame);

        // COMPUTE AND SWOW DIFFERENCE
        cvSub (frame, last_frame, diff, NULL);
        cvShowImage("Difference", diff);

        // WAIT FOR BREAK AND TO ADJUST FPS
        char c = cvWaitKey(50);
        if(c == 27) break;

        // UPDATE LAST FRAME
        cvCopy (frame, last_frame, NULL);

    }

    //RELEASE IMAGE STRUCTURES
    cvReleaseImage (&last_frame);
    cvReleaseImage (&frame);
    cvReleaseImage (&diff);

    // RELEASE CAPTURE
    cvReleaseCapture(&capture);
    // DESTROY WINDOWS
    cvDestroyWindow("Video");
    cvDestroyWindow("Difference");
    return 0;
}
Ejemplo n.º 19
0
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator -= ( ofxCvFloatImage& mom ) {
	if( mom.width == width && mom.height == height ) {
		cvSub( cvImage, mom.getCvImage(), cvImageTemp );
		swapTemp();
	} else {
        cout << "error in -=, images are different sizes" << endl;
	}
}
Ejemplo n.º 20
0
//形态学细化匹配运算
void lhMorpThinFit(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL, int type=LH_MORP_TYPE_BINARY)
{

	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	lhMorpHMTOpen(src, dst, sefg, NULL, type);
	cvSub(src, dst, dst);
}
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageGray.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 1) {
		AddError(wxT("The input image is not a grayscale image."));
		return;
	}

	// Check the background image
	if (! mBackgroundImage) {
		AddError(wxT("No background image loaded."));
		return;
	}
	if ((cvGetSize(inputimage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(inputimage).width != cvGetSize(mBackgroundImage).width)) {
		AddError(wxT("Input and background images don't have the same size."));
		return;
	}

	try {
		// Correct the inputimage with the difference in image mean
		if (mCorrectMean) {
			cvAddS(inputimage, cvScalar(mBackgroundImageMean.val[0] - cvAvg(inputimage).val[0]), inputimage);
		}

		// Background subtraction
		if (mMode == sMode_SubImageBackground) {
			cvSub(inputimage, mBackgroundImage, inputimage);
		} else if (mMode == sMode_SubBackgroundImage) {
			cvSub(mBackgroundImage, inputimage, inputimage);
		} else {
			cvAbsDiff(inputimage, mBackgroundImage, inputimage);
		}
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(inputimage);
	}
}
Ejemplo n.º 22
0
void moHighpassModule::applyFilter(){
	int b1 = this->property("size").asInteger()*2+1; //make sure its odd
	int b2 = this->property("blur").asInteger()*2+1; //make sure its odd
	IplImage* src = static_cast<IplImage*>(this->input->getData());
	cvSmooth(src, this->output_buffer, CV_GAUSSIAN, b1);
	cvSub(src, this->output_buffer, this->output_buffer);
	cvSmooth(this->output_buffer, this->output_buffer, CV_GAUSSIAN, b2);
}
Ejemplo n.º 23
0
//形态学基本梯度运算
void lhMorpGradient(const IplImage* src, IplImage* dst, IplConvKernel* se=NULL, int iterations=1)
{
	assert(src != NULL && dst != NULL && src != dst);
	IplImage*  temp = cvCloneImage(src);
	cvErode( src, temp, se, iterations );
    cvDilate( src, dst, se, iterations );
    cvSub( dst, temp, dst );
	cvReleaseImage(&temp);
}
Ejemplo n.º 24
0
//形态学自补顶帽运算
void lhMorpQTopHat(const IplImage* src, IplImage* dst, IplConvKernel* se=NULL, int iterations=1)
{
	assert(src != NULL && dst != NULL && src != dst);
	IplImage*  temp = cvCloneImage(src);
    lhMorpClose( src, temp, se, iterations );
    lhMorpOpen( src, dst, se, iterations );
    cvSub(temp, dst, dst );
	cvReleaseImage(&temp);
}
Ejemplo n.º 25
0
void HarrisBuffer::HarrisFunction(double k, IplImage* dst)
{
  // Harris function in 3D
  // original space-time Harris
  /*detC=  
  cxx.*cyy.*ctt +		xx yy tt
  cxy.*cyt.*cxt +		2 * xy yt xt
  cxt.*cxy.*cyt -		.
  cxx.*cyt.*cyt -		xx yt^2
  cxy.*cxy.*ctt -		tt xy^2	
  cxt.*cyy.*cxt ;		yy xt^2
  */
  cvMul(cxx, cyy, tmp1);
  cvMul(ctt, tmp1, tmp1);

  cvMul(cxy, cxt, tmp2);
  cvMul(cyt, tmp2, tmp2,2);

  cvAdd(tmp1,tmp2,tmp1);

  cvMul(cyt,cyt,tmp2);
  cvMul(cxx,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  cvMul(cxy,cxy,tmp2);
  cvMul(ctt,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  cvMul(cxt,cxt,tmp2);
  cvMul(cyy,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  //trace3C=(cxx+cyy+ctt).^3;
  cvAdd(cxx,cyy,tmp2);
  cvAdd(ctt,tmp2,tmp2);
  cvPow(tmp2,tmp2,3);

  //H=detC-stharrisbuffer.kparam*trace3C;
  cvScale(tmp2,tmp2,k,0);
  cvSub(tmp1,tmp2,dst);
}
Ejemplo n.º 26
0
void HarrisBuffer::OpticalFlowFromSMM()
{
  // ref: Laptev et al. CVIU 2007, eq.(8)
  cvMul(cxx, cyy, tmp1);
  cvMul(cxy, cxy, tmp2);
  cvSub(tmp1,tmp2,tmp5);

  cvMul(cyy, cxt, tmp3);
  cvMul(cxy, cyt, tmp4);
  cvSub(tmp3,tmp4,tmp6);

  cvDiv(tmp6,tmp5,OFx);

  cvMul(cxx, cyt, tmp3);
  cvMul(cxy, cxt, tmp4);
  cvSub(tmp3,tmp4,tmp6);

  cvDiv(tmp6,tmp5,OFy);
}
Ejemplo n.º 27
0
//形态学对比度增强运算
void lhMorpEnhance(const IplImage* src, IplImage* dst, IplConvKernel* se=NULL, int iterations=1)
{
	assert(src != NULL && dst != NULL && src != dst);
	IplImage*  temp = cvCloneImage(src);
    lhMorpWhiteTopHat( src, temp, se, iterations );
    lhMorpBlackTopHat( src, dst, se, iterations );
	cvAdd(src, temp, temp);
    cvSub(temp, dst, dst );
	cvReleaseImage(&temp);
}
Ejemplo n.º 28
0
//形态学H凸变换
void lhMorpHConvex(const IplImage* src, IplImage* dst, unsigned char h, IplConvKernel* se = NULL)
{
	assert(src != NULL  &&  dst != NULL && src != dst );

	//p150

	lhMorpHMax(src, dst, h, se);
	cvSub(src, dst, dst);

}
Ejemplo n.º 29
0
void* multiThread1_1(void* Arg1)
{
	void** Arg = (void**) Arg1;
	cvZero( (IplImage*)Arg[1] );		
	cvZero( (IplImage*)Arg[4] );
	cvAndDiff( (IplImage*)Arg[4], cvScalar(*(float*)Arg[6]), (IplImage*)Arg[5] ); 
	cvSub( (IplImage*)Arg[0], (IplImage*)Arg[4], (IplImage*)Arg[1] ); //f1=f-f4
	cvPow( (IplImage*)Arg[1], (IplImage*)Arg[1], 2.0 );
	return NULL;
}
Ejemplo n.º 30
0
void preProcess(IplImage *prv,IplImage *cur,IplImage *nxt,IplImage *wrt)
{    

	// 
	//cvSub(cur,prv,pImagePool[0]);
	//Detect(pImagePool[0],wrt,Robert);
	switch ( whichKernel)
	{
	case 0:
		// diff two img
		cvSub(cur,prv,wrt);
		break;
	case 1:
		Detect(wrt, cur,Robert);
		break;
	case 2:
		Detect(wrt, cur,Prewitt);
		break;
	case 3:
		Detect(wrt, cur,LapLas);
		break;
	case 4:
		histogramNormalize(wrt, cur);
		break;
	case 5:
		histogramNormalize(pImagePool[0], cur);
		Detect(wrt,pImagePool[0],Prewitt);
		break;
	case 6:
		{
			imgDFT(wrt, cur);
		}
		break;
	case 7:
		{
			Detect(wrt, cur,FreiChen);
			break;
		}
	case 8:
		{
			Filter(wrt, cur);
			break;
		}
	case 9:
		{

			transRGB2Gray( pImagePool[0], cur);
			Filter(wrt, pImagePool[0],averageFilter);

		}
	default:
		break;
	}

}