示例#1
0
//--------------------------------------------------------------
void testApp::update(){

    vidGrabber.grabFrame();
    if(vidGrabber.isFrameNew()) {
        colorImg.setFromPixels(vidGrabber.getPixels(), vidGrabber.getWidth(), vidGrabber.getHeight());
        colorImg.mirror(false, true);

        greyImage = colorImg;
        
        greyImageSmall.scaleIntoMe(greyImage);
        
        haarFinder.findHaarObjects(greyImageSmall);
    }
    
}
示例#2
0
//--------------------------------------------------------------
void testApp::update(){
	
	
	/************ UPDATE BALL ***********************/

	//Update ball position
	ballPositionX += ballVelocityX;
	ballPositionY += ballVelocityY;
	
	if(ballPositionX < 0 || ballPositionX > ofGetWidth()) {
		ballVelocityX *= -1;
	}
	
	if (ballPositionY < 0 || ballPositionY > ofGetHeight()) {
		ballVelocityY *= -1;
	}
	
	/************ UPDATE KINECT ***********************/

	kinect.update();
	
	// get color pixels
	colorImageRGB			= kinect.getPixels();
	
	// get depth pixels
	depthOrig = kinect.getDepthPixels();
	
	// save original depth, and do some preprocessing
	depthProcessed = depthOrig;
	if(invert) depthProcessed.invert();
	if(mirror) {
		depthOrig.mirror(false, true);
		depthProcessed.mirror(false, true);
		colorImageRGB.mirror(false, true);
	}
	if(preBlur) cvSmooth(depthProcessed.getCvImage(), depthProcessed.getCvImage(), CV_BLUR , preBlur*2+1);
	if(topThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), topThreshold * 255, 255, CV_THRESH_TRUNC);
	if(bottomThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), bottomThreshold * 255, 255, CV_THRESH_TOZERO);
	if(dilateBeforeErode) {
		if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
		if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
	} else {
		if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
		if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
	}
	depthProcessed.flagImageChanged();
	
	// find contours
	depthContours.findContours(depthProcessed,
							   minBlobSize * minBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
							   maxBlobSize * maxBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
							   maxNumBlobs, findHoles, useApproximation);
	
	
	// Clear old attraction points
	attractPts.clear();
	
	// Find centroid point for each blob area and add an attraction force to it
	for (int i=0; i<depthContours.blobs.size(); i++) {
		
		attractPts.push_back(new ofPoint(depthContours.blobs[i].centroid));
		//printf("Blob %d: %f %f \n", i, depthContours.blobs[i].centroid.x, depthContours.blobs[i].centroid.y);
	}
	
	// if one blob found, find nearest point in blob area
	static ofxVec3f newPoint;
	if(depthContours.blobs.size() == 1) {
		ofxCvBlob &blob = depthContours.blobs[0];
		
		depthOrig.setROI(blob.boundingRect);
		
		double minValue, maxValue;
		CvPoint minLoc, maxLoc;
		cvMinMaxLoc(depthOrig.getCvImage(), &minValue, &maxValue, &minLoc, &maxLoc, NULL);
		
		depthOrig.resetROI();
		
		newPoint.x = maxLoc.x + blob.boundingRect.x;
		newPoint.y = maxLoc.y + blob.boundingRect.y;
		//		newPoint.z = (maxValue + offset) * depthScale;	// read from depth map
		//printf("Min: %f %f Max: %f %f \n", minLoc.x, minLoc.y, maxLoc.x, maxLoc.y);
		
		// read directly from distance (in cm)
		// this doesn't seem to work, need to look into it
		newPoint.z = (kinect.getDistanceAt(newPoint) + depthOffset) * depthScale;
		
		// apply kalman filtering
		if(doKalman) {
			newPoint.x = updateKalman(0, newPoint.x);
			newPoint.y = updateKalman(1, newPoint.y);
			newPoint.z = updateKalman(2, newPoint.z);
		}

	} else {
		clearKalman(0);
		clearKalman(1);
		clearKalman(2);
	}
	
	pointHead = (pointHead + 1) % kNumPoints;
	curPoint += (newPoint - curPoint) * lerpSpeed;
	points[pointHead] = curPoint;
	
}