Beispiel #1
0
void blur_function(const IplImage *latent_image, IplImage *blur_image, const CvMat *hom1, const CvMat *hom2)
{
	const int T = 20;
	const int tau = 10;
	CvMat *id_mat = cvCreateMat(3, 3, CV_32FC1);
	cvSetIdentity(id_mat, cvRealScalar(1));
	CvMat *invhom1 = cvCreateMat(3, 3, CV_32FC1);
	cvInvert(hom1, invhom1, CV_LU);
	
	CvMat *h1 = cvCreateMat(3, 3, CV_32FC1);
	CvMat *h2 = cvCreateMat(3, 3, CV_32FC1);
	CvSize size = cvSize(latent_image->width, latent_image->height);
	IplImage *temp = cvCreateImage(size, latent_image->depth, latent_image->nChannels);
	IplImage *blur = cvCreateImage(size, IPL_DEPTH_32F, latent_image->nChannels);
	cvSetZero(blur);
	
	for (int i = 1; i <= tau; ++i)
	{
		cvAddWeighted(id_mat, (double)(T-i)/T, invhom1, (double)i/T, 0, h1);
		cvAddWeighted(id_mat, (double)(T-i)/T, hom2, (double)i/T, 0, h2);
		cvWarpPerspective(latent_image, temp, h1, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		cvAdd(blur, temp, blur, NULL);
		cvWarpPerspective(latent_image, temp, h2, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		cvAdd(blur, temp, blur, NULL);
	}
	cvAdd(blur, latent_image, blur, NULL);
	cvConvertScale(blur, blur_image, 1.0/(2*tau+1), 0);
	
	cvReleaseMat(&id_mat);
	cvReleaseMat(&invhom1);
	cvReleaseMat(&h1);
	cvReleaseMat(&h2);
	cvReleaseImage(&temp);
	cvReleaseImage(&blur);
}
Beispiel #2
0
void sum_rgb(IplImage *src, IplImage *dst) {

    // Allocate individual image planes
    IplImage *r = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
    IplImage *g = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
    IplImage *b = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

    // Split image onto the color planes
    cvSplit(src, r, g, b, NULL);

    // Temporary storage
    IplImage *s = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

    // Add equally weighted rgb values
    cvAddWeighted(r, 1./3., g, 1./3., 0.0, s);
    cvAddWeighted(s, 2./3., b, 1./3., 0.0, s);

    // Truncate values above 100
    cvThreshold(s, dst, 100, 100, CV_THRESH_TRUNC);


    cvReleaseImage(&r);
    cvReleaseImage(&g);
    cvReleaseImage(&b);
    cvReleaseImage(&s);
}
Beispiel #3
0
void VarFlow::calculate_residual(int current_level, float h, IplImage** J13_array, IplImage** J23_array){
								
                            
	IplImage* imgU = imgU_array[current_level];
	IplImage* imgV = imgV_array[current_level];
	IplImage* imgAfxfx = imgAfxfx_array[current_level];
	IplImage* imgAfxfy = imgAfxfy_array[current_level];
	IplImage* imgAfxft = J13_array[current_level];
	IplImage* imgAfyfy = imgAfyfy_array[current_level];
	IplImage* imgAfyft = J23_array[current_level];
	IplImage* imgU_res_err = imgU_res_err_array[current_level];
	IplImage* imgV_res_err = imgV_res_err_array[current_level];
                                
    int i;
    float *u_ptr, *v_ptr, *fxfx_ptr, *fxfy_ptr, *fyfy_ptr, *u_res_err_ptr, *v_res_err_ptr;
    
    int max_i = imgU->height * imgU->width;
    int x, y;
    
    u_res_err_ptr = (float*)(imgU_res_err->imageData);
    v_res_err_ptr = (float*)(imgV_res_err->imageData);
        
    u_ptr = (float*)(imgU->imageData);
    v_ptr = (float*)(imgV->imageData);
            
    fxfx_ptr = (float*)(imgAfxfx->imageData);
    fxfy_ptr = (float*)(imgAfxfy->imageData);
    fyfy_ptr = (float*)(imgAfyfy->imageData);
    
    x = 0;
    y = 0;
    
    for(i = 0; i < max_i; i++){
            
            // Get A^h * x_tilde^h (equation 10)
            u_res_err_ptr[i] = residual_part_step(imgU, x, y, h, fxfx_ptr[i], fxfy_ptr[i], v_ptr[i] );
            v_res_err_ptr[i] = residual_part_step(imgV, x, y, h, fyfy_ptr[i], fxfy_ptr[i], u_ptr[i] );
            
            x++;
            if(x == imgU->width){
                  x = 0;
                  y++;
            }
        
    }
    
    // Get full residual
    cvAddWeighted( imgAfxft, (1/alpha), imgU_res_err, -1, 0, imgU_res_err );
    cvAddWeighted( imgAfyft, (1/alpha), imgV_res_err, -1, 0, imgV_res_err );
}
Beispiel #4
0
void Sharpen::run()
{
	const IplImage *const tInputImage = *mInputImage;
    Image::matchImageFormats(tInputImage, &mOutputImage);
    cvSmooth(tInputImage, mOutputImage);
    cvAddWeighted(tInputImage, 1.5, mOutputImage, -0.5, 0.0, mOutputImage);
}
Beispiel #5
0
int mixImages(char *filename, char *filename2, char *output) {
    image1 = cvLoadImage(filename, 1);
    assert( image1 != 0 );

    image2 = cvLoadImage(filename2, 1);
    assert( image2 != 0 );

    // размер шаблона
    int width = image2->width;
    int height = image2->height;

    // устанавливаем область интереса
    cvSetImageROI(image1, cvRect(0, 0 , width, height));
    // взвешенная сумма
    cvAddWeighted(image1, 0.5, image2, 0.5, 0.0, result);
    // освобождаем область интереса
    cvResetImageROI(image1);

    cvSave("output.jpeg", output);

    // освобождаем ресурсы
    cvReleaseImage( &image1 );
    cvReleaseImage( &image2 );
    cvReleaseImage( &result );
    return 0;
}
Beispiel #6
0
void display(struct ctx *ctx)
{
    int i;
    static IplImage *oldimage = NULL;

    /*if (ctx->num_fingers == NUM_FINGERS)
    {

#if defined(SHOW_HAND_CONTOUR)
        cvDrawContours(ctx->image, ctx->contour,
                       CV_RGB(0,0,255), CV_RGB(0,255,0),
                       0, 1, CV_AA, cvPoint(0,0));
#endif


        cvCircle(ctx->image, ctx->hand_center, 5, CV_RGB(255, 255, 0),
                 1, CV_AA, 0);
        cvCircle(ctx->image, ctx->hand_center, ctx->hand_radius,
                 CV_RGB(255, 0, 0), 1, CV_AA, 0);

        for (i = 0; i < ctx->num_fingers; i++)
        {

            cvCircle(ctx->image, ctx->fingers[i], 10,
                     CV_RGB(0, 255, 0), 3, CV_AA, 0);

            cvLine(ctx->image, ctx->hand_center, ctx->fingers[i],
                   CV_RGB(255,255,0), 1, CV_AA, 0);
        }

        for (i = 0; i < ctx->num_defects; i++)
        {
            cvCircle(ctx->image, ctx->defects[i], 2,
                     CV_RGB(200, 200, 200), 2, CV_AA, 0);
        }
    }*/

    cvShowImage("output", ctx->image);
    IplImage *dst;
    if ( oldimage != NULL ) {
        dst = cvCloneImage(ctx->image);

        cvSub(ctx->image,oldimage,dst,NULL);

        cvShowImage("thresholded", dst);

        cvAddWeighted(oldimage, 0.25, ctx->image, 0.75, 0.0, oldimage);


        cvReleaseImage(&dst);
        //cvReleaseImage(&oldimage);


    }
    else {
        cvShowImage("thresholded", ctx->thr_image);
        oldimage=cvCloneImage(ctx->image);

    }
}
//--------------------------------------------------------------------------------
void ofxCvFloatImage::addWeighted( ofxCvGrayscaleImage& mom, float f ) {
	if( matchingROI(getROI(), mom.getROI()) ) {
        convertGrayToFloat(mom.getCvImage(), cvImageTemp);
        cvAddWeighted( cvImageTemp, f, cvImage, 1.0f-f,0, cvImage );
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in addWeighted, ROI mismatch");
    }
}
 void Update(DefHist* pH, float W)
 {   /* Update histogram: */
     double  Vol, WM, WC;
     Vol = 0.5*(m_HistVolume + pH->m_HistVolume);
     WM = Vol*(1-W)/m_HistVolume;
     WC = Vol*(W)/pH->m_HistVolume;
     cvAddWeighted(m_pHist, WM, pH->m_pHist,WC,0,m_pHist);
     m_HistVolume = (float)cvSum(m_pHist).val[0];
 }   /* Update histogram: */
Beispiel #9
0
/**
 *
 * Creates the Worm heads up display for monitoring or for saving to disk
 * You must first pass a pointer to an IplImage that has already been allocated and
 * has dimensions of Worm->SizeOfImage
 *
 *
 */
int CreateWormHUDS(IplImage* TempImage, WormAnalysisData* Worm, WormAnalysisParam* Params, Frame* IlluminationFrame){

	int CircleDiameterSize=10;

	/** Overly a translucent image of the illumination pattern**/

	double weighting=0.20; //Alpha blend weighting
	if (Params->DLPOn) weighting=0.45; // if DLP is on make the illumination pattern more opaque
	cvAddWeighted(Worm->ImgOrig,1,IlluminationFrame->iplimg,weighting,0,TempImage);

	//Want to also display boundary!
	cvDrawContours(TempImage, Worm->Boundary, cvScalar(255,0,0),cvScalar(0,255,0),100);

//	DrawSequence(&TempImage,Worm->Segmented->LeftBound);
//	DrawSequence(&TempImage,Worm->Segmented->RightBound);

	cvCircle(TempImage,*(Worm->Tail),CircleDiameterSize,cvScalar(255,255,255),1,CV_AA,0);
	cvCircle(TempImage,*(Worm->Head),CircleDiameterSize/2,cvScalar(255,255,255),1,CV_AA,0);

	/** Prepare Text **/
	CvFont font;
	cvInitFont(&font,CV_FONT_HERSHEY_TRIPLEX ,1.0,1.0,0,2,CV_AA);

	/** Display DLP On Off **/
	if (Params->DLPOn) {
		cvPutText(TempImage,"DLP ON",cvPoint(20,70),&font,cvScalar(255,255,255));
	}
	/** Display Recording if we are recording **/
	if (Params->Record){
		cvPutText(TempImage,"Recording",cvPoint(20,100),&font,cvScalar(255,255,255));

	} else {
		if (Params->DLPOn) cvPutText(TempImage,"Did you forget to record?",cvPoint(20,100),&font,cvScalar(255,255,255));
	}


	/*** Let the user know if the illumination flood light is on ***/
	if (Params->IllumFloodEverything){
		cvPutText(TempImage,"Floodlight",cvPoint(20,130),&font,cvScalar(255,255,255));
	}

	char protoNum[20];
	/** If we are using protocols, display the protocol number **/
	if (Params->ProtocolUse){
		sprintf(protoNum,"Step %d",Params->ProtocolStep);
		cvPutText(TempImage,protoNum,cvPoint(20,160),&font,cvScalar(255,255,255));

	}
	free(protoNum);


	char frame[30];
	sprintf(frame,"%d",Worm->frameNum);
	cvPutText(TempImage,frame,cvPoint(Worm->SizeOfImage.width- 200,Worm->SizeOfImage.height - 10),&font,cvScalar(255,255,255) );
	free( frame);
	return 0;
}
Beispiel #10
0
static void icvH11Ops( CvMat* X, CvMat* Y, void* userdata )
{
	CvH11OpsData* h11 = (CvH11OpsData*)userdata;
	h11->AOps( X, h11->AR, h11->userdata );
	h11->AtOps( h11->AR, h11->AtR, h11->userdata );
	double rc = h11->fe_inv_2 * cvDotProduct( h11->atr, X );
	cvAddWeighted( h11->AtR, -h11->fe_inv, h11->atr, rc, 0, h11->AtR );
	cvMul( h11->sigx, X, h11->tX );
	cvAdd( h11->tX, h11->AtR, Y );
}
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 3) {
		AddError(wxT("The input image is not a color image."));
		return;
	}

	// Check and update the background
	if (! mOutputImage) {
	  mOutputImage = cvCloneImage(inputimage);
	} else {
	  cvCopyImage(inputimage, mOutputImage);
	}
	if (! mBackgroundImage) {
		mBackgroundImage = cvCloneImage(mOutputImage);
	} else if (mUpdateProportion > 0) {
		if ((cvGetSize(mOutputImage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(mOutputImage).width != cvGetSize(mBackgroundImage).width)) {
			AddError(wxT("Input and background images do not have the same size."));
			return;
		}

		cvAddWeighted(mOutputImage, mUpdateProportion, mBackgroundImage, 1.0 - mUpdateProportion, 0, mBackgroundImage);
	}

	try {
		// Correct the tmpImage with the difference in image mean
		if (mCorrectMean) {
			mBackgroundImageMean = cvAvg(mBackgroundImage);
			CvScalar tmpScalar = cvAvg(mOutputImage);
			cvAddS(mOutputImage, cvScalar(mBackgroundImageMean.val[0] - tmpScalar.val[0], mBackgroundImageMean.val[1] - tmpScalar.val[1], mBackgroundImageMean.val[2] - tmpScalar.val[2]), mOutputImage);
		}

		// Background subtraction
		if (mMode == sMode_SubImageBackground) {
			cvSub(mOutputImage, mBackgroundImage, mOutputImage);
		} else if (mMode == sMode_SubBackgroundImage) {
			cvSub(mBackgroundImage, mOutputImage, mOutputImage);
		} else {
			cvAbsDiff(mOutputImage, mBackgroundImage, mOutputImage);
		}
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}
	mCore->mDataStructureImageColor.mImage = mOutputImage;
	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mOutputImage);
	}
}
void easyplot(IplImage *fr, IplImage *fr0)
{
	int rmean = 0.5*(r1+r2), rthick = r1-r2;
	CvPoint up, cp, bp;
	
	up.x = coo2pix(upc.x);
	up.y = coo2pix(upc.y);
	
	// pause button
	if(sqr(pbuttonp.x-up.x)+sqr(pbuttonp.y-up.y)<sqr(r1+buttonr)) {
		plot_circular_button(fr, yellow);
	}
	
	// user handle
	cvCircle(fr, up, rmean,   red, rthick+2, CV_AA, 0);
	cvCircle(fr, up, rmean, white, rthick-4, CV_AA, 0);
	
	// computer handle
	cp.x = coo2pix(cpc.x);
	cp.y = coo2pix(cpc.y);
	cvCircle(fr, cp, rmean, green, rthick+2, CV_AA, 0);
	cvCircle(fr, cp, rmean, white, rthick-4, CV_AA, 0);
	
	// ball
	bp.x = coo2pix(bpc.x);
	bp.y = coo2pix(bpc.y);
	if(bp.y>winy+r0) {
		cvCircle(fr, cvPoint(winx/2,winy-bound), criticr, CV_RGB(150,150,150), 10, CV_AA, 0);
		cvCircle(fr, cvPoint(winx/2,winy-bound), explosr, CV_RGB(150,150,150), criticr-explosr, CV_AA, 0);
		explosr+=7;
	}
	else if(bp.y<-r0) {
		cvCircle(fr, cvPoint(winx/2,bound), criticr, CV_RGB(150,150,150), 10, CV_AA, 0);
		cvCircle(fr, cvPoint(winx/2,bound), explosr, CV_RGB(150,150,150), criticr-explosr, CV_AA, 0);
		explosr+=7;
	}
	else {
		cvCircle(fr, bp, r0,  white, -1, CV_AA, 0);
		cvCircle(fr, bp, r0,   blue, 3, CV_AA, 0);
	}
	
	// blur processing
	cvSmooth(fr, fr, CV_BLUR, 15, 15, 0.0, 0.0);
	cvAddWeighted(fr0, 0.55, fr, 1.0, -10.0, fr);
	
	// score
	cvSetImageROI(fr, scoreroi1);
	cvAdd(fr, scoretext1, fr);
	cvSetImageROI(fr, scoreroi2);
	cvAdd(fr, scoretext2, fr);
	cvResetImageROI(fr);
	cvSmooth(fr, fr, CV_BLUR, 5, 5, 0.0, 0.0);
	
	cvCopy(fr, fr0);
}
Beispiel #13
0
/// ****************************************************
///
///             UPDATE BLEND IMAGE
///
/// ****************************************************
void testApp::updateBlendImage() {
  // Temporary storage.
  IplImage* s = cvCreateImage( cvSize(outW,outH), IPL_DEPTH_8U, 3 );
  cvSet(s, cvScalar(0,0,0));

    ofxCvColorImage tempImg;
    tempImg.allocate(outW, outH);

    for(size_t i = 0; i < ipGrabber.size(); i++) {
        //ofSetColor(255, 255, 255,alphaValues[i]);
        //ipGrabber[i]->draw(xpos,ypos,w,h);
        //ipImg[i].setFromPixels(ipGrabber[i]->getPixels(), outW, outH);
        int ipWidth = ipGrabber[i]->getWidth();
        int ipHeight = ipGrabber[i]->getHeight();
        try {
            if ((ipWidth != outW) || (ipHeight != outH)) {
                ofxCvColorImage tempIpImg;
                tempIpImg.allocate(ipWidth, ipHeight);

                    tempIpImg.setFromPixels(ipGrabber[i]->getPixels(), ipWidth, ipHeight);
                    tempImg.scaleIntoMe(tempIpImg, OF_INTERPOLATE_NEAREST_NEIGHBOR);
                    cvAddWeighted( s, 1.0,
                                   tempImg.getCvImage(), (float)alphaValues[i]/255.0,
                                   0.0, s);
            }
            else {
                tempImg.setFromPixels(ipGrabber[i]->getPixels(), outW, outH);
                cvAddWeighted( s, 1.0,
                               tempImg.getCvImage(), (float)alphaValues[i]/255.0,
                               0.0, s);
            }
         }
        catch (Exception& e) {
            ofLogError("testApp") << "Exception : " <<  e.displayText();
        }
    }
  cvAddWeighted( s, 1.0,
                outputImg.getCvImage(), (float)alphaValues[NUM_CAMERAS-1]/255.0,
                0.0, (unsigned char *)s);
  blendImg.setFromPixels((unsigned char *)s->imageData,outW,outH );
  cvReleaseImage( &s );
}
Beispiel #14
0
//--------------------------------------------------------------------------------
void ofxCvFloatImage::addWeighted( ofxCvGrayscaleImage& mom, float f ) {
	if( pushSetBothToTheirIntersectionROI(*this,mom) ) {
        convertGrayToFloat(mom.getCvImage(), cvImageTemp);
        cvAddWeighted( cvImageTemp, f, cvImage, 1.0f-f,0, cvImage );
        popROI();       //restore prevoius ROI
        mom.popROI();   //restore prevoius ROI           
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in addWeighted, ROI mismatch");
    }
}
/*
 * Creates an illumination
 * according to an illumination montage and the location of a segmented worm.
 *
 * To use with protocol, use GetMontageFromProtocolInterp() first
 *
 * FlipLR is a bool. When set to 1, the illumination pattern is reflected across the worm's centerline.
 */
void IllumWorm(SegmentedWorm* segworm, CvSeq* IllumMontage, IplImage* img,CvSize gridSize, int FlipLR){
	int DEBUG=0;
	if (DEBUG) printf("In IllumWorm()\n");
	CvPoint* polyArr=NULL;
	int k;
	int numpts=0;
	for (k = 0; k < IllumMontage->total; ++k) {

		numpts=CreatePointArrFromMontage(&polyArr,IllumMontage,k);
		//ReturnHere
		int j;
		//DisplayPtArr(polyArr,numpts);
		CvPoint* ptPtr=polyArr;
		for (j = 0; j < numpts; ++j) {
			/** make a local copy of the current pt in worm space **/
			CvPoint wormPt=*(ptPtr);
			/** replace that point with the new pt in image space **/
			*(ptPtr)=CvtPtWormSpaceToImageSpace(wormPt,segworm, gridSize,FlipLR);
			/** move to the next pointer **/
			ptPtr++;
		}



		if (DEBUG) {
				int i;
			printf("new polygon\n");
			for (i = 0; i < numpts; i++) {
				printf(" (%d, %d)\n",polyArr[i].x,polyArr[i].y);
				cvCircle(img, polyArr[i], 1, cvScalar(255, 255, 255), 1);
				cvShowImage("Debug",img);
				cvWaitKey(10);
			}

		}


		/** Actually draw the polygon **/
		cvFillPoly(img,&polyArr,&numpts,1,cvScalar(255,255,255),8);

		free(polyArr);
		polyArr=NULL;
	}

	if (DEBUG)	{
		IplImage* TempImage=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
		DrawSequence(&TempImage,segworm->LeftBound);
		DrawSequence(&TempImage, segworm->RightBound);
		double weighting=0.4;
		cvAddWeighted(img,weighting,TempImage,1,0,TempImage);
		cvShowImage("Debug",TempImage);
	}

}
//--------------------------------------------------------------------------------
void ofxCvFloatImage::addWeighted( ofxCvGrayscaleImage& mom, float f ) {
	if( mom.width == width && mom.height == height ) {
         IplImage* cvTemp = cvCreateImage( cvSize(width,height), IPL_DEPTH_32F, 1 );
         
		 cvConvertScale( mom.getCvImage(), cvTemp, 1, 0 );
		 //cvConvert( mom.getCvImage(), cvImage ); 
         cvAddWeighted( cvTemp, f, cvImage, 1.0f-f,0, cvImageTemp );
         swapTemp();
         cvReleaseImage( &cvTemp );
    } else {
        cout << "error in addWeighted, images are different sizes" << endl;
    }
}
//--------------------------------------------------------------------------------
void ofxCvShortImage::addWeighted( ofxCvGrayscaleImage& mom, float f ) {
	if( !bAllocated ){
		ofLog(OF_LOG_ERROR, "in addWeighted, image is not allocated");		
		return;	
	}
	
	if( matchingROI(getROI(), mom.getROI()) ) {
        convertGrayToShort(mom.getCvImage(), cvImageTemp);
        cvAddWeighted( cvImageTemp, f, cvImage, 1.0f-f,0, cvImage );
        flagImageChanged();
    } else {
        ofLog(OF_LOG_ERROR, "in addWeighted, ROI mismatch");
    }
}
Beispiel #18
0
void tlen_image(const char *in_file, const char *templ_file, const char *result_file, double level)
{
    int x = 0;
    int y = 0;
    IplImage* dst;
    IplImage* image, *gray_image;
    IplImage* templ;
    IplImage* new_templ;
    int width, height;
    double alpha, beta;

    /* Get image */
    image = cvLoadImage(in_file, 1);
    assert(image != 0);

    templ = cvLoadImage(templ_file, 1);
    assert(templ != 0);

    /* Size of template */
    new_templ = img_resize(templ, image->width, image->height);
    width = new_templ->width;
    height = new_templ->height;
    dst = cvCloneImage(new_templ);

    alpha = level;
    beta = 0.5;

    /* Set area */
    cvSetImageROI(image, cvRect(x, y, width, height));
    /* Summ */
    cvAddWeighted(image, alpha, new_templ, beta, 0.0, dst);
    /* Free area */
    cvResetImageROI(image);

    /* Set black-white image */
    gray_image = cvCreateImage(cvSize(image->width,image->height), 8, 1);
    cvCvtColor(dst, gray_image, CV_RGB2GRAY);

    cvSaveImage(result_file, gray_image, NULL);

    /* Fre memory */
    cvReleaseImage(&image);
    cvReleaseImage(&gray_image);
    cvReleaseImage(&templ);
    cvReleaseImage(&new_templ);
    cvReleaseImage(&dst);
}
//--------------------------------------------------------------
void testApp::update() {
	cam.setMaxLen(panel.getValueF("maxLen"));
	cam.setStepSize(panel.getValueF("stepSize"));
	cam.setClipping(panel.getValueF("nearClipping"), panel.getValueF("farClipping"));
	cam.setOrthoScale(panel.getValueF("orthoScale"));
	cam.setPosition(ofxVec3f(panel.getValueF("camx"), panel.getValueF("camy"), panel.getValueF("camz")));
	cam.setRotation(ofxVec3f(panel.getValueF("camrx"), panel.getValueF("camry"), panel.getValueF("camrz")));
 
	int blurAmount = panel.getValueI("blurAmount");
	int threshLevel = panel.getValueI("threshLevel");
	int minArea = panel.getValueI("minArea");
	int maxArea = panel.getValueI("maxArea");
	int nConsidered = panel.getValueI("nConsidered");
 
	cam.update();
	if(cam.isFrameNew()) {
		float alpha = panel.getValueF("alpha");
		float beta = 1 - alpha;
		IplImage* camIpl = toCv(cam.getPixels(), cam.getWidth(), cam.getHeight(), OF_IMAGE_GRAYSCALE);
		cvAddWeighted(camIpl, alpha, blur.getCvImage(), beta, 0, blur.getCvImage());
		blur.flagImageChanged();
		blur.blur(blurAmount * 2 + 1);
 
		thresh = blur;
		thresh.threshold(threshLevel);
		finder.findContours(thresh, minArea, maxArea, nConsidered, false, false);
 
		// make the average the centroid
		// should be more stable than the moments
		vector<ofxCvBlob>& blobs = finder.blobs;
		for(int i = 0; i < blobs.size(); i++) {
			ofxCvBlob& cur = blobs[i];
			vector<ofPoint>& pts = cur.pts;
			
			pts = ofGetSmoothed(pts, 8);
			
			ofPoint& centroid = cur.centroid;
			centroid.set(0, 0);
			for(int j = 0; j < pts.size(); j++) {
				centroid += pts[j];
			}
			centroid /= pts.size();
		}
 
		updateOsc();
	}
}
void pausestate(struct timeval *past, IplImage *fr)
{
	// check whether key something or not
	char key;
	key = cvWaitKey(3);
	if(key==27) escflag = 1;
	else if(key=='r' || key=='R') restartflag = 1;
	else if(key==' ') pauseflag = 1;
	// pause loop
	if(pauseflag) {
		cvAddWeighted(fr, 0.5, pausetext, 1.0, 0.0, fr);
		while(pauseflag) {
			cvCopy(fr, pausefr);
			// check whether key something or not
			key = cvWaitKey(10);
			if(key==27) {
				escflag   = 1;
				pauseflag = 0;
			}
			else if(key=='r' || key=='R') {
				restartflag = 1;
				pauseflag = 0;
			}
			else if(key==' ') {
				pauseflag = !pauseflag;
			}
			// check mouse position among buttons
			if(mp.x>rbutton1.x && mp.x<rbutton3.x) {
				if(mp.y>rbutton1.y && mp.y<rbutton3.y) {
					plot_rectangular_button(pausefr, yellow, 1);
				}
				else if(mp.y>ebutton1.y && mp.y<ebutton3.y) {
					plot_rectangular_button(pausefr, yellow, 2);
				}
				else if(mp.y>sbutton1.y && mp.y<sbutton3.y) {
					plot_rectangular_button(pausefr, yellow, 3);
				}
			}
			cvSmooth(pausefr, pausefr, CV_BLUR, 5, 5, 0.0, 0.0);
			cvShowImage(windowname, pausefr);
		}
		time_interval(past);
	}
}
Beispiel #21
0
// Constructor for range sample (includes motion track)
TrainingSample::TrainingSample(IplImage *frame, MotionTrack mt, HWND lc, HIMAGELIST il, int groupId) {
	// this constructor should only be called for range sample type
	assert(groupId == GROUPID_RANGESAMPLES);

	hwndListControl = lc;
    hImageList = il;
    iGroupId = groupId;
	iOrigId = groupId;
    motionTrack = mt;
    motionHistory = NULL;

    fullImageCopy = cvCreateImage(cvSize(frame->width,frame->height),IPL_DEPTH_8U, 3);
    cvZero(fullImageCopy);
    cvAddWeighted(frame, 0.5, fullImageCopy, 0.5, 0.0, fullImageCopy);
    
    // draw the trajectory in the sample image
	Template t("", mt);
    DrawTrack(fullImageCopy, t.m_points, CV_RGB(100,255,100), 3, GESTURE_SQUARE_SIZE);

    resizedImage = cvCreateImage(cvSize(LISTVIEW_SAMPLE_X,LISTVIEW_SAMPLE_Y),IPL_DEPTH_8U, 3); 
    bmpImage = new Bitmap(LISTVIEW_SAMPLE_X, LISTVIEW_SAMPLE_Y, PixelFormat24bppRGB);

    cvResize(fullImageCopy, resizedImage, CV_INTER_AREA);

    IplToBitmap(resizedImage, bmpImage);
    bmpImage->GetHBITMAP(NULL, &hbmImage);

    // Add image to imagelist
    int imgIndex = ImageList_Add(hImageList, hbmImage, NULL);

    // Add item to list view
    lvi.mask = LVIF_IMAGE | LVIF_STATE | LVIF_GROUPID;
    lvi.state = 0;
    lvi.stateMask = 0;
    lvi.iGroupId = groupId;
    lvi.iItem = imgIndex;
    lvi.iImage = imgIndex;
    lvi.iSubItem = 0;
    int newListItemPos = ListView_InsertItem(hwndListControl, &lvi);

    id = ListView_MapIndexToID(hwndListControl, newListItemPos);
}
Beispiel #22
0
int main(int argc, const char * argv[]) {
    IplImage *src1, *src2;
    if( argc == 9 && ((src1 = cvLoadImage(argv[1],1)) != 0) && ((src2 = cvLoadImage(argv[2],1)) != 0)) {
        int x = atoi(argv[3]);
        int y = atoi(argv[4]);
        int width = atoi(argv[5]);
        int height = atoi(argv[6]);
        double alpha = atoi(argv[7]);
        double beta = atoi(argv[8]);
        
        cvSetImageROI(src1, cvRect(x, y, width, height));
        cvSetImageROI(src2, cvRect(0, 0, width, height));
        cvAddWeighted(src1, alpha, src2, beta, 0.0, src1);
        cvResetImageROI(src1);
        cvNamedWindow( "Alpha_blend", 1 );
        cvShowImage( "Alpha_blend", src1 );
        cvWaitKey();
    }
    return 0;
}
void Background::backgroundSubtractionAndUpdate() {
    static const int THRESHOLD=30;
    static const int EROSION=20;
    static const int DILATION=20;

    // Soustraction
    Frame c;
    c = currentFrame;

    // Output image
    currentFrame->diff(&actualBackground);

    // Update
    currentFrame->threshold(THRESHOLD);
    currentFrame->dilation(DILATION);
    currentFrame->fill();
    currentFrame->erosion(EROSION);

    cvCopy(actualBackground.image,c.image,currentFrame->image);

    cvAddWeighted(actualBackground.image,0.9, c.image,0.1, 0.0, actualBackground.image);
}
Beispiel #24
0
void BlobLight::draw(){
	
	ofPushStyle();
	ofxCvGrayscaleImage Largeimg = blob(0)->grayDiff;
	img.scaleIntoMe(Largeimg);
	getPlugin<CameraCalibration*>(controller)->applyWarp(0);

	cvAddWeighted(history.getCvImage(),alpha, img.getCvImage(),beta,0.0, history.getCvImage());
	cvSubS(history.getCvImage(), cvScalar(addblack*100) , history.getCvImage());
	history.flagImageChanged();	

	if(blur > 0)
		img.blur(blur);
	if(threshold > 0){
		img.threshold(threshold, false);
	}
	if(blur2 > 0){
		img.blurGaussian(blur2);
	}
	

	historyTmp = history;
	historyTmp.blurGaussian(blur2);
	
	ofEnableAlphaBlending();
	glBlendFunc (GL_SRC_COLOR, GL_ONE);	

	ofSetColor(historyalpha*r, historyalpha*g, historyalpha*b, historyalpha*255);
	historyTmp.draw(0,0,1,1);
	
	ofSetColor( blobalpha*r2,  blobalpha*g2,  blobalpha*b2, blobalpha*255);
	img.draw(0, 0, 1,1);
	glPopMatrix();
	
//	img.draw(0, 0);

	ofPopStyle();
}
Beispiel #25
0
//全景拼接
void SiftMatch::on_mosaicButton_clicked()
{
    //若能成功计算出变换矩阵,即两幅图中有共同区域,才可以进行全景拼接
    if(H)
    {
        //拼接图像,img1是左图,img2是右图
        CalcFourCorner();//计算图2的四个角经变换后的坐标
        //为拼接结果图xformed分配空间,高度为图1图2高度的较小者,根据图2右上角和右下角变换后的点的位置决定拼接图的宽度
        xformed = cvCreateImage(cvSize(MIN(rightTop.x,rightBottom.x),MIN(img1->height,img2->height)),IPL_DEPTH_8U,3);
        //用变换矩阵H对右图img2做投影变换(变换后会有坐标右移),结果放到xformed中
        cvWarpPerspective(img2,xformed,H,CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS,cvScalarAll(0));

        //cvNamedWindow(IMG_MOSAIC_TEMP); //显示临时图,即只将图2变换后的图
        //cvShowImage(IMG_MOSAIC_TEMP,xformed);

        //简易拼接法:直接将将左图img1叠加到xformed的左边
        xformed_simple = cvCloneImage(xformed);//简易拼接图,可笼子xformed
        cvSetImageROI(xformed_simple,cvRect(0,0,img1->width,img1->height));
        cvAddWeighted(img1,1,xformed_simple,0,0,xformed_simple);
        cvResetImageROI(xformed_simple);

        //cvNamedWindow(IMG_MOSAIC_SIMPLE);//创建窗口
        //cvShowImage(IMG_MOSAIC_SIMPLE,xformed_simple);//显示简易拼接图

        //处理后的拼接图,克隆自xformed
        xformed_proc = cvCloneImage(xformed);

        //重叠区域左边的部分完全取自图1
        cvSetImageROI(img1,cvRect(0,0,MIN(leftTop.x,leftBottom.x),xformed_proc->height));
        cvSetImageROI(xformed,cvRect(0,0,MIN(leftTop.x,leftBottom.x),xformed_proc->height));
        cvSetImageROI(xformed_proc,cvRect(0,0,MIN(leftTop.x,leftBottom.x),xformed_proc->height));
        cvAddWeighted(img1,1,xformed,0,0,xformed_proc);
        cvResetImageROI(img1);
        cvResetImageROI(xformed);
        cvResetImageROI(xformed_proc);

        //cvNamedWindow(IMG_MOSAIC_BEFORE_FUSION);
        //cvShowImage(IMG_MOSAIC_BEFORE_FUSION,xformed_proc);//显示融合之前的拼接图

        //采用加权平均的方法融合重叠区域
        int start = MIN(leftTop.x,leftBottom.x) ;//开始位置,即重叠区域的左边界
        double processWidth = img1->width - start;//重叠区域的宽度
        double alpha = 1;//img1中像素的权重
        for(int i=0; i<xformed_proc->height; i++)//遍历行
        {
            const uchar * pixel_img1 = ((uchar *)(img1->imageData + img1->widthStep * i));//img1中第i行数据的指针
            const uchar * pixel_xformed = ((uchar *)(xformed->imageData + xformed->widthStep * i));//xformed中第i行数据的指针
            uchar * pixel_xformed_proc = ((uchar *)(xformed_proc->imageData + xformed_proc->widthStep * i));//xformed_proc中第i行数据的指针
            for(int j=start; j<img1->width; j++)//遍历重叠区域的列
            {
                //如果遇到图像xformed中无像素的黑点,则完全拷贝图1中的数据
                if(pixel_xformed[j*3] < 50 && pixel_xformed[j*3+1] < 50 && pixel_xformed[j*3+2] < 50 )
                {
                    alpha = 1;
                }
                else
                {   //img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比
                    alpha = (processWidth-(j-start)) / processWidth ;
                }
                pixel_xformed_proc[j*3] = pixel_img1[j*3] * alpha + pixel_xformed[j*3] * (1-alpha);//B通道
                pixel_xformed_proc[j*3+1] = pixel_img1[j*3+1] * alpha + pixel_xformed[j*3+1] * (1-alpha);//G通道
                pixel_xformed_proc[j*3+2] = pixel_img1[j*3+2] * alpha + pixel_xformed[j*3+2] * (1-alpha);//R通道
            }
        }
        cvNamedWindow(IMG_MOSAIC_PROC);//创建窗口
        cvShowImage(IMG_MOSAIC_PROC,xformed_proc);//显示处理后的拼接图

        //*重叠区域取两幅图像的平均值,效果不好
        //设置ROI,是包含重叠区域的矩形
        cvSetImageROI(xformed_proc,cvRect(MIN(leftTop.x,leftBottom.x),0,img1->width-MIN(leftTop.x,leftBottom.x),xformed_proc->height));
        cvSetImageROI(img1,cvRect(MIN(leftTop.x,leftBottom.x),0,img1->width-MIN(leftTop.x,leftBottom.x),xformed_proc->height));
        cvSetImageROI(xformed,cvRect(MIN(leftTop.x,leftBottom.x),0,img1->width-MIN(leftTop.x,leftBottom.x),xformed_proc->height));
        cvAddWeighted(img1,0.5,xformed,0.5,0,xformed_proc);
        cvResetImageROI(xformed_proc);
        cvResetImageROI(img1);
        cvResetImageROI(xformed); //*/

        /*对拼接缝周围区域进行滤波来消除拼接缝,效果不好
        //在处理前后的图上分别设置横跨拼接缝的矩形ROI
        cvSetImageROI(xformed_proc,cvRect(img1->width-10,0,img1->width+10,xformed->height));
        cvSetImageROI(xformed,cvRect(img1->width-10,0,img1->width+10,xformed->height));
        cvSmooth(xformed,xformed_proc,CV_MEDIAN,5);//对拼接缝周围区域进行中值滤波
        cvResetImageROI(xformed);
        cvResetImageROI(xformed_proc);
        cvShowImage(IMG_MOSAIC_PROC,xformed_proc);//显示处理后的拼接图 */

        /*想通过锐化解决变换后的图像失真的问题,对于扭曲过大的图像,效果不好
        double a[]={  0, -1,  0, -1,  5, -1, 0, -1,  0  };//拉普拉斯滤波核的数据
        CvMat kernel = cvMat(3,3,CV_64FC1,a);//拉普拉斯滤波核
        cvFilter2D(xformed_proc,xformed_proc,&kernel);//滤波
        cvShowImage(IMG_MOSAIC_PROC,xformed_proc);//显示处理后的拼接图*/

        //保存拼接图
        QString name_xformed = name1;//文件名,原文件名去掉序号后加"_Mosaic"
        cvSaveImage(name_xformed.replace( name_xformed.lastIndexOf(".",-1)-1 , 1 , "_Mosaic").toAscii().data() , xformed_simple);//保存简易拼接图
        cvSaveImage(name_xformed.insert( name_xformed.lastIndexOf(".",-1) , "_Proc").toAscii().data() , xformed_proc);//保存处理后的拼接图
        ui->mosaicButton->setEnabled(false);//禁用全景拼接按钮
    }
}
Beispiel #26
0
	void utilCV_AddWeighted(UtilCVImageStruct *p_im_src1, double alpha, UtilCVImageStruct *p_im_src2,
		double beta, double gamma, UtilCVImageStruct *p_im_dst) {
		cvAddWeighted((IplImage *) p_im_src1->p_iplimg, alpha, (IplImage *) p_im_src2->p_iplimg, beta, gamma,
			(IplImage *) p_im_dst->p_iplimg);
	}
void CV_PyrSegmentationTest::run( int /*start_from*/ )
{
    const int level = 5;
    const double range = 20;

    int code = CvTS::OK;

    CvPoint _cp[] ={{33,33}, {43,33}, {43,43}, {33,43}};
    CvPoint _cp2[] ={{50,50}, {70,50}, {70,70}, {50,70}};
    CvPoint* cp = _cp;
    CvPoint* cp2 = _cp2;
    CvConnectedComp *dst_comp[3];
    CvRect rect[3] = {{50,50,21,21}, {0,0,128,128}, {33,33,11,11}};
    double a[3] = {441.0, 15822.0, 121.0};

/*    ippiPoint cp3[] ={130,130, 150,130, 150,150, 130,150};  */
/*	CvPoint cp[] ={0,0, 5,5, 5,0, 10,5, 10,0, 15,5, 15,0};  */
    int nPoints = 4;
    int block_size = 1000;

    CvMemStorage *storage;   /*   storage for connected component writing  */
    CvSeq *comp;

    CvRNG* rng = ts->get_rng();
    int i, j, iter;

    IplImage *image, *image_f, *image_s;
    CvSize size = {128, 128};
    const int threshold1 = 50, threshold2 = 50;

    rect[1].width = size.width;
    rect[1].height = size.height;
    a[1] = size.width*size.height - a[0] - a[2];

    OPENCV_CALL( storage = cvCreateMemStorage( block_size ) );

    for( iter = 0; iter < 2; iter++ )
    {
        int channels = iter == 0 ? 1 : 3;
        int mask[] = {0,0,0};

        image = cvCreateImage(size, 8, channels );
        image_s = cvCloneImage( image );
        image_f = cvCloneImage( image );

        if( channels == 1 )
        {
            int color1 = 30, color2 = 110, color3 = 180;

            cvSet( image, cvScalarAll(color1));
            cvFillPoly( image, &cp, &nPoints, 1, cvScalar(color2));
            cvFillPoly( image, &cp2, &nPoints, 1, cvScalar(color3));
        }
        else
        {
            CvScalar color1 = CV_RGB(30,30,30), color2 = CV_RGB(255,0,0), color3 = CV_RGB(0,255,0);

            assert( channels == 3 );
            cvSet( image, color1 );
            cvFillPoly( image, &cp, &nPoints, 1, color2);
            cvFillPoly( image, &cp2, &nPoints, 1, color3);
        }

        cvRandArr( rng, image_f, CV_RAND_UNI, cvScalarAll(0), cvScalarAll(range*2) );
        cvAddWeighted( image, 1, image_f, 1, -range, image_f );

        cvPyrSegmentation( image_f, image_s,
                           storage, &comp,
                           level, threshold1, threshold2 );

        if(comp->total != 3)
        {
            ts->printf( CvTS::LOG,
                "The segmentation function returned %d (not 3) components\n", comp->total );
            code = CvTS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }
        /*  read the connected components     */
        dst_comp[0] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 0 );
        dst_comp[1] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 1 );
        dst_comp[2] = (CvConnectedComp*)CV_GET_SEQ_ELEM( CvConnectedComp, comp, 2 );

        /*{
            for( i = 0; i < 3; i++ )
            {
                CvRect r = dst_comp[i]->rect;
                cvRectangle( image_s, cvPoint(r.x,r.y), cvPoint(r.x+r.width,r.y+r.height),
                    CV_RGB(255,255,255), 3, 8, 0 );
            }

            cvNamedWindow( "test", 1 );
            cvShowImage( "test", image_s );
            cvWaitKey(0);
        }*/

        code = cvTsCmpEps2( ts, image, image_s, 10, false, "the output image" );
        if( code < 0 )
            goto _exit_;

        for( i = 0; i < 3; i++)
        {
            for( j = 0; j < 3; j++ )
            {
                if( !mask[j] && dst_comp[i]->area == a[j] &&
                    dst_comp[i]->rect.x == rect[j].x &&
                    dst_comp[i]->rect.y == rect[j].y &&
                    dst_comp[i]->rect.width == rect[j].width &&
                    dst_comp[i]->rect.height == rect[j].height )
                {
                    mask[j] = 1;
                    break;
                }
            }
            if( j == 3 )
            {
                ts->printf( CvTS::LOG, "The component #%d is incorrect\n", i );
                code = CvTS::FAIL_BAD_ACCURACY;
                goto _exit_;
            }
        }

        cvReleaseImage(&image_f);
        cvReleaseImage(&image);
        cvReleaseImage(&image_s);
    }

_exit_:

    cvReleaseMemStorage( &storage );
    cvReleaseImage(&image_f);
    cvReleaseImage(&image);
    cvReleaseImage(&image_s);

    if( code < 0 )
        ts->set_failed_test_info( code );
}
Beispiel #28
0
void CvEM::init_em( const CvVectors& train_data )
{
    CvMat *w = 0, *u = 0, *tcov = 0;

    CV_FUNCNAME( "CvEM::init_em" );

    __BEGIN__;

    double maxval = 0;
    int i, force_symm_plus = 0;
    int nclusters = params.nclusters, nsamples = train_data.count, dims = train_data.dims;

    if( params.start_step == START_AUTO_STEP || nclusters == 1 || nclusters == nsamples )
        init_auto( train_data );
    else if( params.start_step == START_M_STEP )
    {
        for( i = 0; i < nsamples; i++ )
        {
            CvMat prob;
            cvGetRow( params.probs, &prob, i );
            cvMaxS( &prob, 0., &prob );
            cvMinMaxLoc( &prob, 0, &maxval );
            if( maxval < FLT_EPSILON )
                cvSet( &prob, cvScalar(1./nclusters) );
            else
                cvNormalize( &prob, &prob, 1., 0, CV_L1 );
        }
        EXIT; // do not preprocess covariation matrices,
              // as in this case they are initialized at the first iteration of EM
    }
    else
    {
        CV_ASSERT( params.start_step == START_E_STEP && params.means );
        if( params.weights && params.covs )
        {
            cvConvert( params.means, means );
            cvReshape( weights, weights, 1, params.weights->rows );
            cvConvert( params.weights, weights );
            cvReshape( weights, weights, 1, 1 );
            cvMaxS( weights, 0., weights );
            cvMinMaxLoc( weights, 0, &maxval );
            if( maxval < FLT_EPSILON )
                cvSet( weights, cvScalar(1./nclusters) );
            cvNormalize( weights, weights, 1., 0, CV_L1 );
            for( i = 0; i < nclusters; i++ )
                CV_CALL( cvConvert( params.covs[i], covs[i] ));
            force_symm_plus = 1;
        }
        else
            init_auto( train_data );
    }

    CV_CALL( tcov = cvCreateMat( dims, dims, CV_64FC1 ));
    CV_CALL( w = cvCreateMat( dims, dims, CV_64FC1 ));
    if( params.cov_mat_type == COV_MAT_GENERIC )
        CV_CALL( u = cvCreateMat( dims, dims, CV_64FC1 ));

    for( i = 0; i < nclusters; i++ )
    {
        if( force_symm_plus )
        {
            cvTranspose( covs[i], tcov );
            cvAddWeighted( covs[i], 0.5, tcov, 0.5, 0, tcov );
        }
        else
            cvCopy( covs[i], tcov );
        cvSVD( tcov, w, u, 0, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
        if( params.cov_mat_type == COV_MAT_SPHERICAL )
            cvSetIdentity( covs[i], cvScalar(cvTrace(w).val[0]/dims) );
        else if( params.cov_mat_type == COV_MAT_DIAGONAL )
            cvCopy( w, covs[i] );
        else
        {
            // generic case: covs[i] = (u')'*max(w,0)*u'
            cvGEMM( u, w, 1, 0, 0, tcov, CV_GEMM_A_T );
            cvGEMM( tcov, u, 1, 0, 0, covs[i], 0 );
        }
    }

    __END__;

    cvReleaseMat( &w );
    cvReleaseMat( &u );
    cvReleaseMat( &tcov );
}
Beispiel #29
0
//--------------------------------------------------------------
void testApp::update() {
    ofBackground(0, 0, 0);

    kinect.update();

    // there is a new frame and we are connected
    if(kinect.isFrameNew()) {

        // load grayscale depth image from the kinect source
        grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height);

        // add effects to smooth signal and reduce noise
        grayImage.blur(7);
        grayImage.dilate();


        //for (int i=0; i < nearThreshold- farThreshold; i+=1) {
        grayThreshNear = grayImage;
        grayThreshFar = grayImage;
        grayThreshNear.threshold(farThreshold + 1 + 1, true);
        grayThreshFar.threshold(farThreshold + 1);
        cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);

        cvAddWeighted(grayImage.getCvImage(), .9, grayImage_avg.getCvImage(), .1, 0.0, grayImage_avg.getCvImage());
        cvSmooth(grayImage_avg.getCvImage(), grayImage_avg.getCvImage());

        contourFinder.findContours(grayImage_avg, 200, (340*240)/3, 10, true);
        //}


        //cvAnd(grayImage.getCvImage(), grayOverall.getCvImage(), grayOverall.getCvImage(), NULL);



        /*
         * Edit data through pixel manipulation
         */

        /*
        unsigned char * pix = grayImage.getPixels();

        // draw image
        int diffThreshold = nearThreshold - farThreshold;

        // iterate through pixel data
        for(int w = 0; w < grayImage.getWidth(); w++) {
            for(int h=0; h < grayImage.getHeight(); h++) {

                // average previous pixels with current reading
                int index = h * int(grayImage.getWidth()) + w;
                double currentDepth = pix[index];
                double prevDepth = prevPix[index];
                prevPix[index] = prevDepth* .90 + currentDepth * .1;
                double  depth = prevPix[index];

                // boolean operations if inside sandbox boundaries and depth boundaries
                bool isInBoundary = w<xRightBoundary && w>xLeftBoundary && h<yBottomBoundary && h>yTopBoundary;
                bool isInDepthBoundary = currentDepth < nearThreshold && currentDepth > farThreshold;

                // set Timeout zone
                int lowerBoundary = 210;
                int upperBoundary = 400;
                bool isInActiveZone = currentDepth<upperBoundary && currentDepth>lowerBoundary;

                if (isInBoundary && isInActiveZone) {
                    isTimeout = false;
                    startTimeout = ofGetElapsedTimef();
                }
                if (ofGetElapsedTimef() == startTimeout + timeout) {
                    isTimeout = true;
                }

                // set pixels of colorImg based on depth ratio
                if( isInDepthBoundary && isInBoundary && !isTimeout) {
                    double diffToThreshold = depth - farThreshold;
                    double ratioInDepth = diffToThreshold/diffThreshold;

                    ofColor color = gradient.getColor(floor(ratioInDepth * 20), 0);
                    colorImg.setColor(w,h, color);
                }

            }
        }

        colorImg.update();
         */

        // update the cv images
        grayImage.flagImageChanged();
    }
}
Beispiel #30
0
void Cam::update() {
    
    if (!isCapturing) return;
    
    if (useVideoPlayer) {
        videoPlayer.update();
        isFrameNew = videoPlayer.isFrameNew();
    }
    else if (useBlackmagic) {
        isFrameNew = blackmagic.update();
    }
    else {
        vidGrabber.update();
        isFrameNew = vidGrabber.isFrameNew();
    }
    
    // cache the fresh colour pixels for use in opticalflow and in Cam::getImage()
    if (isFrameNew) {
        if (useVideoPlayer) colourPixels.setFromPixels(videoPlayer.getPixels(), camWidth, camHeight, OF_IMAGE_COLOR);
        else if (useBlackmagic) colourPixels = blackmagic.getColorPixels();
        else colourPixels.setFromPixels(vidGrabber.getPixels(), camWidth, camHeight, OF_IMAGE_COLOR);
    }
	
	if (doFlow && isFrameNew){
        
        colorImage.setFromPixels(colourPixels.getPixels(), camWidth, camHeight);
		lastGrayImage = thisGrayImage;
        thisGrayImage = colorImage;
        ofxCvGrayscaleImage tempGrey = thisGrayImage;
        ofxCvGrayscaleImage tempGreyLast = lastGrayImage;
        tempGrey.resize(cvWidth, cvHeight);
        tempGreyLast.resize(cvWidth, cvHeight);
        
        // optical flow processing
        if (doFlow && flowSize > 0) {
            if(flowSize % 2  == 0) --flowSize;
            opticalFlowLk.calc(tempGreyLast, tempGrey, (int)flowSize);

            cvAddWeighted(
                          opticalFlowLk.vel_x.getCvImage(), (1 - opticalFlowSmoothing)*opticalFlowSensitivity,
                          flowX.getCvImage(), opticalFlowSmoothing, 0,
                          flowX.getCvImage()
                          );
            flowX.flagImageChanged();
            
            cvAddWeighted(
                          opticalFlowLk.vel_y.getCvImage(), (1 - opticalFlowSmoothing)*opticalFlowSensitivity,
                          flowY.getCvImage(), opticalFlowSmoothing, 0,
                          flowY.getCvImage()
                          );
            flowY.flagImageChanged();
            
            cvAddWeighted(
                          flowX.getCvImage(), 1,
                          flowY.getCvImage(), 1, 0,
                          flowCombined.getCvImage()
                          );
            //flowCombined.dilate();
            flowCombined.blur((int)blur);
            //flowCombined.erode();
            //flowCombined.erode();
            flowCombined.flagImageChanged();
            
            flow.begin();
            flowCombined.draw(0, 0, camWidth, camHeight);
            flow.end();
            
            ofxCvFloatImage tempFlow = flowCombined;
            tempFlow.resize(camWidth, camHeight);
            delayMap.setFromFloatImage(tempFlow);
        }
        
        // update textures for drawing
        //colorImage.updateTexture();
        //thisGrayImage.updateTexture();
        delayMap.updateTexture();
	}
    
}