Пример #1
0
//--------------------------------------------------------------
void openniTracking::calculateMotion(){

	if(frameCounter > 5){// dont do anything until we have enough in history

		grayNow = cleanImage;

		if(computeOpticalFlow){
			opticalFlow.calc(grayPrev,grayNow,11); // optical flow analysis based on frame difference
			cvSmooth(opticalFlow.getVelX(), opticalFlow.getVelX(), CV_BLUR , CAM_VELOCITY_BLUR);
			cvSmooth(opticalFlow.getVelY(), opticalFlow.getVelY(), CV_BLUR , CAM_VELOCITY_BLUR);
		}

		motionImg.absDiff(grayPrev, grayNow);   // motionImg is the difference between current and previous frame
		cvThreshold(motionImg.getCvImage(), motionImg.getCvImage(), (int)mThreshold, 255, CV_THRESH_TOZERO); // anything below mThreshold, drop to zero (compensate for noise)
		numPixelsChanged = motionImg.countNonZeroInRegion(0, 0, _width, _height); // this is how many pixels have changed that are above mThreshold

		grayPrev = grayNow; // save current frame for next loop

		cvThreshold(motionImg.getCvImage(), motionImg.getCvImage(), (int)mThreshold, 255, CV_THRESH_TOZERO);// chop dark areas

		// Calculate the general "mood" of the scene
		mood = averageBnWVal(motionImg.getCvImage());

	}else{
		frameCounter++;
	}

}
Пример #2
0
void testApp::update() {
	int curClient;
	ofBackground( 100, 100, 100 );
	vidGrabber.grabFrame();

	if( vidGrabber.isFrameNew() ) {
        colorImg = vidGrabber.getPixels();

        colorImg.mirror( flipImageV, !flipImageH ); // Image must be flipped vertically - screen coordinate system.
        grayImg = colorImg;



        if( bLearnBakground )
		{
            bgImg = grayImg;
            bLearnBakground = false;
			//maskingChanges = true;
        }

        //grayImg.absDiff( maskedBgImage );

		// oldGrayImg will be used to track the general "mood" or activity of the current frame.
		// It currently contains the last frame, unmodified
		// After absDiff instruction it will contain the difference between the last and the current frame.
		oldGrayImg.absDiff(grayImg);

		// Calculate the general "mood" of the scene now.
		mood = averageBnWVal(oldGrayImg.getCvImage());

		// update oldGrayImg
		oldGrayImg = grayImg;

		// Report scene mood.
		if ((mood > moodSpike) || (currNonMoodFrame > nonMoodFrames))
		{
			char updateMSG[100];
			sprintf(updateMSG,"%d|%d|%f|%f|%f|%f\n", USER_MOOD, SCENE_MOOD_USER, mood, 0.0,
					0.0, 0.0);
			printf("%s",updateMSG);
			tcps.sendToAll(updateMSG);
			currNonMoodFrame = 0;

		} else
		{
			currNonMoodFrame++;
		}

		grayImg.absDiff(bgImg);
        grayImg.blur( 11 );
        grayImg.threshold( threshold );

        //findContures( img, minSize, maxSize, nMax, inner contours yes/no )
        contourFinder.findContours( grayImg, blobSizeMin, blobSizeMax, maxUsers, false );
        blobTracker.trackBlobs( contourFinder.blobs );
    }
	
	for (curClient = 0; curClient < tcps.getNumClients(); curClient++)
	{
		std::string request = tcps.receive(curClient);
		char data[100];
		if (request.    length())
		{
			if (strncmp(request.c_str(), "PtBlob?",7) == 0)
			{
				int curBlob;
				
				// PtBlob collision test
				// Get the coordinates
				float x, y;
				strncpy(data, &(request.c_str()[7]), 100);
				sscanf(data, "%f,%f", &x, &y);
				for (curBlob = 0; curBlob < blobTracker.blobs.size(); curBlob++)
				{
					ofxCvTrackedBlob blob = blobTracker.getById(curBlob);
					
				}

				
			}
			
		}
	}
}