Exemplo n.º 1
0
void handDetector::findFeatures(ofxCvGrayscaleImage grayImage, unsigned char * pixels) {
	// Constrain to ROI so we ignore extraneous data
	grayImage.setROI(roiRect);
	
	contourFinder.findContours(grayImage, BLOB_SIZE, (grayImage.width * grayImage.height)/8, 4, true);
	
	numHoles = 0;
	numHands = 0;
	averageHandHeight = 0;
	
	for(int i = 0; i < contourFinder.nBlobs; i++) {
				
		// blob.hole is true when we have a hand (not a pinch)
		if(contourFinder.blobs[i].hole){
			if (numHands < NUMBER_OF_HANDS) {
				handBlobs[numHands].myBlob = contourFinder.blobs[i];
				//findFingers(handBlobs[numHands]);
				
				// Can only call one of the functions below as they both set averagePt
				//averageFinger(handBlobs[numHands]);
				findHandPoint(handBlobs[numHands]);
				
				numHands++;
			}
		}
		else {
			numHoles++;
		}
	}
		
	if (numHoles > 0) {
		for (int j = 0; j < numHands; j++) {
			handBlobs[j].pinch = true;
		}
	}
	else {
		for (int j = 0; j < numHands; j++) {
			handBlobs[j].pinch = false;
		}
	}
		
	for (int h = 0; h < numHands; h++) {
		averageHandHeight += pixels[(int)((handBlobs[h].averagePt.x + roiRect.x) + (handBlobs[h].averagePt.y + roiRect.y) * grayImage.width)];
	}
	
	averageHandHeight /= numHands;
}
Exemplo n.º 2
0
//--------------------------------------------------------------
void testApp::update(){
	
	
	/************ UPDATE BALL ***********************/

	//Update ball position
	ballPositionX += ballVelocityX;
	ballPositionY += ballVelocityY;
	
	if(ballPositionX < 0 || ballPositionX > ofGetWidth()) {
		ballVelocityX *= -1;
	}
	
	if (ballPositionY < 0 || ballPositionY > ofGetHeight()) {
		ballVelocityY *= -1;
	}
	
	/************ UPDATE KINECT ***********************/

	kinect.update();
	
	// get color pixels
	colorImageRGB			= kinect.getPixels();
	
	// get depth pixels
	depthOrig = kinect.getDepthPixels();
	
	// save original depth, and do some preprocessing
	depthProcessed = depthOrig;
	if(invert) depthProcessed.invert();
	if(mirror) {
		depthOrig.mirror(false, true);
		depthProcessed.mirror(false, true);
		colorImageRGB.mirror(false, true);
	}
	if(preBlur) cvSmooth(depthProcessed.getCvImage(), depthProcessed.getCvImage(), CV_BLUR , preBlur*2+1);
	if(topThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), topThreshold * 255, 255, CV_THRESH_TRUNC);
	if(bottomThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), bottomThreshold * 255, 255, CV_THRESH_TOZERO);
	if(dilateBeforeErode) {
		if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
		if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
	} else {
		if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
		if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
	}
	depthProcessed.flagImageChanged();
	
	// find contours
	depthContours.findContours(depthProcessed,
							   minBlobSize * minBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
							   maxBlobSize * maxBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
							   maxNumBlobs, findHoles, useApproximation);
	
	
	// Clear old attraction points
	attractPts.clear();
	
	// Find centroid point for each blob area and add an attraction force to it
	for (int i=0; i<depthContours.blobs.size(); i++) {
		
		attractPts.push_back(new ofPoint(depthContours.blobs[i].centroid));
		//printf("Blob %d: %f %f \n", i, depthContours.blobs[i].centroid.x, depthContours.blobs[i].centroid.y);
	}
	
	// if one blob found, find nearest point in blob area
	static ofxVec3f newPoint;
	if(depthContours.blobs.size() == 1) {
		ofxCvBlob &blob = depthContours.blobs[0];
		
		depthOrig.setROI(blob.boundingRect);
		
		double minValue, maxValue;
		CvPoint minLoc, maxLoc;
		cvMinMaxLoc(depthOrig.getCvImage(), &minValue, &maxValue, &minLoc, &maxLoc, NULL);
		
		depthOrig.resetROI();
		
		newPoint.x = maxLoc.x + blob.boundingRect.x;
		newPoint.y = maxLoc.y + blob.boundingRect.y;
		//		newPoint.z = (maxValue + offset) * depthScale;	// read from depth map
		//printf("Min: %f %f Max: %f %f \n", minLoc.x, minLoc.y, maxLoc.x, maxLoc.y);
		
		// read directly from distance (in cm)
		// this doesn't seem to work, need to look into it
		newPoint.z = (kinect.getDistanceAt(newPoint) + depthOffset) * depthScale;
		
		// apply kalman filtering
		if(doKalman) {
			newPoint.x = updateKalman(0, newPoint.x);
			newPoint.y = updateKalman(1, newPoint.y);
			newPoint.z = updateKalman(2, newPoint.z);
		}

	} else {
		clearKalman(0);
		clearKalman(1);
		clearKalman(2);
	}
	
	pointHead = (pointHead + 1) % kNumPoints;
	curPoint += (newPoint - curPoint) * lerpSpeed;
	points[pointHead] = curPoint;
	
}