示例#1
0
 //--------------------------------------------------------------
 ofPixelsRef OpenNIProcessor::process ( ofBaseImage & image ){
     openNIDevice.update();
     
     // update TSPS people
     int nUsers = openNIDevice.getNumTrackedUsers();
     for (int i=0; i<nUsers; i++){
         ofxOpenNIUser & user = openNIDevice.getTrackedUser(i);
         OpenNIPerson * person = (OpenNIPerson *) getTrackedPerson( user.getXnID() );
         
         // update person
         if ( person != NULL ){
             // this is weird
             if ( user.getMaskPixels().size() > 0){ // should prevent weird crash
                 blobImage.setFromPixels(user.getMaskPixels());
             }
             int numBlobs = contourFinder.findContours( blobImage, minBlobArea, maxBlobArea, 1, bFindHoles );
             if (numBlobs > 0 ){
                 person->updateCentroid(contourFinder.blobs[0].centroid, true);
                 person->updateBoundingRect(contourFinder.blobs[0].boundingRect);
                 person->updateContour(contourFinder.blobs[0].pts );            
             }
             person->update( user );
             
             EventArgs args;
             args.person = person;
             args.scene  = scene;
             ofNotifyEvent( Events().personUpdated, args, this );
         }
     }
     
     grayPixels = openNIDevice.getDepthPixels().getChannel(0);
     return grayPixels;
 };
示例#2
0
 //--------------------------------------------------------------
 void OpenNIProcessor::onUserEvent( ofxOpenNIUserEvent & event ){   
     OpenNIPerson * person = NULL;
     
     switch (event.userStatus) {            
         case USER_TRACKING_STARTED:     
             person = new OpenNIPerson( event.id, trackedPeople->size() );
             trackedPeople->push_back(person);
             
             EventArgs args;
             args.person = person;
             args.scene  = scene;
             ofNotifyEvent( Events().personEntered, args, this );            
             break;
             
         case USER_TRACKING_STOPPED:
             person = (OpenNIPerson*) getTrackedPerson( event.id );
             if ( person != NULL){
                 //delete the object and remove it from the vector
                 std::vector<Person*>::iterator it;
                 for(it = trackedPeople->begin(); it != trackedPeople->end(); it++){
                     if((*it)->pid == person->pid){
                         trackedPeople->erase(it);
                         break;
                     }
                 }
                 
                 EventArgs args;
                 args.person = person;
                 args.scene  = scene;
                 ofNotifyEvent( Events().personWillLeave, args, this );
                 
                 // delete pointer
                 delete person;
                 return;
             }
             openNIDevice.resetUserTracking(event.id);
             break;
             
         case USER_CALIBRATION_STARTED:
             break;
             
         case USER_CALIBRATION_STOPPED:
             break;
             
         case USER_SKELETON_LOST:
             break;    
             
         case USER_SKELETON_FOUND:
             break;
             
         default:
             break;
     };
 }
//---------------------------------------------------------------------------
void ofxTSPSPeopleTracker::blobOff( int x, int y, int id, int order )
{
	ofxTSPSPerson* p = getTrackedPerson(id);
	//ensure we are tracking
	if(NULL == p){
		ofLog(OF_LOG_WARNING, "ofxPerson::warning. encountered persistent blob without a person behind them\n");		
		return;
	}
	
	//alert the delegate
	if(eventListener != NULL){
		eventListener->personWillLeave(p, &scene);
	}
    
    ofPoint centroid = p->getCentroidNormalized(width, height);
	if (bTuioEnabled) {
		tuioClient.cursorReleased(centroid.x, centroid.y, order);	
	}
	//send osc kill message if enabled
	if (bOscEnabled){
		oscClient.personWillLeave(p, centroid, width, height, p_Settings->bSendOscContours);
	};
	
	//send tcp kill message if enabled
	if(bTcpEnabled){
		tcpClient.personWillLeave(p, centroid, width, height, p_Settings->bSendOscContours);
	}
    
	if(bWebSocketsEnabled){
		webSocketServer.personWillLeave(p, centroid, width, height, p_Settings->bSendOscContours);
	}
	
	//delete the object and remove it from the vector
	std::vector<ofxTSPSPerson*>::iterator it;
	for(it = trackedPeople.begin(); it != trackedPeople.end(); it++){
		if((*it)->pid == p->pid){
			trackedPeople.erase(it);
			delete p;
			break;
		}
	}
}
//---------------------------------------------------------------------------
void ofxTSPSPeopleTracker::trackPeople()
{	
	//-------------------
	//QUAD WARPING
	//-------------------
		
	//warp background
    //grayImageWarped = grayImage;
    colorImage = grayImage;
    colorImageWarped = colorImage;
    //getQuadSubImage(&colorImage, &colorImageWarped, &p_Settings->quadWarpScaled, 3);
    getQuadSubImage(&grayImage, &grayImageWarped, &p_Settings->quadWarpScaled, 1);
    //grayImageWarped.warpIntoMe(grayImage, p_Settings->quadWarpScaled,     p_Settings->quadWarpOriginal);
	//colorImageWarped.warpIntoMe(colorImage, p_Settings->quadWarpScaled, p_Settings->quadWarpOriginal);	
	
	graySmallImage.scaleIntoMe(grayImageWarped);
	grayBabyImage.scaleIntoMe(grayImageWarped);
	
	grayDiff = grayImageWarped;
	
	//amplify (see cpuimagefilter class)
	if(p_Settings->bAmplify){
		grayDiff.amplify(grayDiff, p_Settings->highpassAmp/15.0f);
	}
	
	grayImageWarped = grayDiff;
	
	//-------------------
	//BACKGROUND
	//-------------------
	
	//force learn background if there are > 5 blobs (off by default)
	//JG Disabling this feature for now, 
	//I think it's a great idea but it needs to be better described and "5" needs to be customizable
//	if (p_Settings->bSmartLearnBackground == true && contourFinder.nBlobs > 5){
//		p_Settings->bLearnBackground = true;
	//	}
	
	//learn background (either in reset or additive)
	if (p_Settings->bLearnBackground){
		cout << "Learning Background" << endl;
		grayBg = grayImageWarped;
	}
	
	//progressive relearn background
	if (p_Settings->bLearnBackgroundProgressive){
		if (p_Settings->bLearnBackground) floatBgImg = grayBg;
		floatBgImg.addWeighted( grayImageWarped, p_Settings->fLearnRate * .00001);
		grayBg = floatBgImg;
		//cvConvertScale( floatBgImg.getCvImage(), grayBg.getCvImage(), 255.0f/65535.0f, 0 );       
		//grayBg.flagImageChanged();			
	}
	
	//printf("track type %d from (%d,%d,%d)\n", p_Settings->trackType, TRACK_ABSOLUTE, TRACK_DARK, TRACK_LIGHT);
	if(p_Settings->trackType == TRACK_ABSOLUTE){
		grayDiff.absDiff(grayBg, grayImageWarped);
	}
	else{
		grayDiff = grayImageWarped;
		if(p_Settings->trackType == TRACK_LIGHT){
			//grayDiff = grayBg - grayImageWarped;
			cvSub(grayBg.getCvImage(), grayDiff.getCvImage(), grayDiff.getCvImage());
		}
		else if(p_Settings->trackType == TRACK_DARK){ 
			cvSub(grayDiff.getCvImage(), grayBg.getCvImage(), grayDiff.getCvImage());
			//grayDiff = grayImageWarped - grayBg;
		}
		grayDiff.flagImageChanged();
	}
	
	//-----------------------
	// IMAGE TREATMENT
	//-----------------------
	if(p_Settings->bSmooth){
		grayDiff.blur((p_Settings->smooth * 2) + 1); //needs to be an odd number
	}
	
	//highpass filter (see cpuimagefilter class)	
	if(p_Settings->bHighpass){
		grayDiff.highpass(p_Settings->highpassBlur, p_Settings->highpassNoise);
	}
	
	//threshold	
	grayDiff.threshold(p_Settings->threshold);
	
	//-----------------------
	// TRACKING
	//-----------------------	
	//find the optical flow
	if (p_Settings->bTrackOpticalFlow){
		opticalFlow.calc(grayLastImage, graySmallImage, 11);
	}
	
	//accumulate and store all found haar features.
	vector<ofRectangle> haarRects;
	if(p_Settings->bDetectHaar){
		haarTracker.findHaarObjects( grayBabyImage );
		float x, y, w, h;
		while(haarTracker.hasNextHaarItem()){
			haarTracker.getHaarItemPropertiesEased( &x, &y, &w, &h );
			haarRects.push_back( ofRectangle(x,y,w,h) );
		}
	}
	
	char pringString[1024];
	sprintf(pringString, "found %i haar items this frame", haarRects.size());
	ofLog(OF_LOG_VERBOSE, pringString);
	
	contourFinder.findContours(grayDiff, p_Settings->minBlob*width*height, p_Settings->maxBlob*width*height, 50, p_Settings->bFindHoles);
	persistentTracker.trackBlobs(contourFinder.blobs);
		
	// By setting maxVector and minVector outside the following for-loop, blobs do NOT have to be detected first
	//            before optical flow can begin working.
	if(p_Settings->bTrackOpticalFlow) {
        scene.averageMotion = opticalFlow.flowInRegion(0,0,width,height);
        scene.percentCovered = 0; 
        opticalFlow.maxVector = p_Settings->maxOpticalFlow;
		opticalFlow.minVector = p_Settings->minOpticalFlow;
	}
	
	for(int i = 0; i < persistentTracker.blobs.size(); i++){
		ofxCvTrackedBlob blob = persistentTracker.blobs[i];
		ofxTSPSPerson* p = getTrackedPerson(blob.id);
		//somehow we are not tracking this person, safeguard (shouldn't happen)
		if(NULL == p){
			ofLog(OF_LOG_WARNING, "ofxPerson::warning. encountered persistent blob without a person behind them\n");
			continue;
		}
		
		scene.percentCovered += blob.area;
		
		//update this person with new blob info
		p->update(blob, p_Settings->bCentroidDampen);

		//normalize simple contour
		for (int i=0; i<p->simpleContour.size(); i++){
			p->simpleContour[i].x /= width;
			p->simpleContour[i].y /= height;
		}
        
		ofRectangle roi;
		roi.x		= fmax( (p->boundingRect.x - p_Settings->haarAreaPadding) * TRACKING_SCALE_FACTOR, 0.0f );
		roi.y		= fmax( (p->boundingRect.y - p_Settings->haarAreaPadding) * TRACKING_SCALE_FACTOR, 0.0f );
		roi.width	= fmin( (p->boundingRect.width  + p_Settings->haarAreaPadding*2) * TRACKING_SCALE_FACTOR, grayBabyImage.width - roi.x );
		roi.height	= fmin( (p->boundingRect.height + p_Settings->haarAreaPadding*2) * TRACKING_SCALE_FACTOR, grayBabyImage.width - roi.y );	
		
		//sum optical flow for the person
		if(p_Settings->bTrackOpticalFlow){
			p->opticalFlowVectorAccumulation = opticalFlow.flowInRegion(roi);
		}
		
		//detect haar patterns (faces, eyes, etc)
		if (p_Settings->bDetectHaar){
			bool bHaarItemSet = false;
				
			//find the region of interest, expanded by haarArea.
			//bound by the frame edge
			//cout << "ROI is " << roi.x << "  " << roi.y << " " << roi.width << "  " << roi.height << endl;
			bool haarThisFrame = false;
			for(int i = 0; i < haarRects.size(); i++){
				ofRectangle hr = haarRects[i];
				//check to see if the haar is contained within the bounding rectangle
				if(hr.x > roi.x && hr.y > roi.y && hr.x+hr.width < roi.x+roi.width && hr.y+hr.height < roi.y+roi.height){
					hr.x /= TRACKING_SCALE_FACTOR;
					hr.y /= TRACKING_SCALE_FACTOR;
					hr.width /= TRACKING_SCALE_FACTOR;
					hr.height /= TRACKING_SCALE_FACTOR;
					p->setHaarRect(hr);
					haarThisFrame = true;
					break;
				}
			}
			if(!haarThisFrame){
				p->noHaarThisFrame();
			}
			/*
			 //JG 1/28/2010
			 //This is the prper way to do the Haar, checking one person at a time.
			 //however this discards the robustness of the haarFinder and 
			 //makes the whole operation really spotty.  
			 // The solution is to put more energy into finding out how 
			 // the haar tracker works to get robust/persistent haar items over time.
			 //for now we just check the whole screen and see if the haar is contained
			grayBabyImage.setROI(roi.x, roi.y, roi.width, roi.height);
			int numFound = haarFinder.findHaarObjects(grayBabyImage, roi);
			//cout << "found " << numFound << " for this object" << endl;
			if(numFound > 0) {
				ofRectangle haarRect = haarFinder.blobs[0].boundingRect;
				haarRect.x /= TRACKING_SCALE_FACTOR;
				haarRect.y /= TRACKING_SCALE_FACTOR;
				haarRect.width /= TRACKING_SCALE_FACTOR;
				haarRect.height /= TRACKING_SCALE_FACTOR;
				p->setHaarRect(haarRect);
			}
			else {
				p->noHaarThisFrame();
			}
			 */
		}
		
		if(eventListener != NULL){
			if( p->velocity.x != 0 || p->velocity.y != 0){
				eventListener->personMoved(p, &scene);
			}
			eventListener->personUpdated(p, &scene);
		}
	}
	
	//normalize it
	scene.percentCovered /= width*height;
	
	//-----------------------
	// VIEWS
	//-----------------------	
    
	//store the old image
	grayLastImage = graySmallImage;
	
	//update views
	
	cameraView.update(colorImage);
	if (p_Settings->bAdjustedViewInColor)
		adjustedView.update(colorImageWarped);
	else
		adjustedView.update(grayImageWarped);
	bgView.update(grayBg);
	processedView.update(grayDiff);
    
	//-----------------------
	// COMMUNICATION
	//-----------------------	

    for (int i = 0; i < trackedPeople.size(); i++){
        ofxTSPSPerson* p = trackedPeople[i];
        ofPoint centroid = p->getCentroidNormalized(width, height);
//			if(p_Settings->bUseHaarAsCenter && p->hasHaarRect()){
        
        if (bTuioEnabled){
            ofPoint tuioCursor = p->getCentroidNormalized(width, height);
            tuioClient.cursorDragged( tuioCursor.x, tuioCursor.y, p->oid);
        }
        
        if (bOscEnabled){
            if( p->velocity.x != 0 || p->velocity.y != 0){
				//DEPRECATED:
                oscClient.personMoved(p, centroid, width, height, p_Settings->bSendOscContours);
            }
            oscClient.personUpdated(p, centroid, width, height, p_Settings->bSendOscContours);
        }
        
        if (bTcpEnabled){
            tcpClient.personMoved(p, centroid, width, height, p_Settings->bSendOscContours);
        }
        
        if (bWebSocketsEnabled){
            webSocketServer.personMoved(p, centroid, width, height, p_Settings->bSendOscContours);
        }
    }
    
	if(bTuioEnabled){
		tuioClient.update();		
	}
	
	if (bOscEnabled){
		oscClient.ip = p_Settings->oscHost;
		oscClient.port = p_Settings->oscPort;
		oscClient.useLegacy = p_Settings->bUseLegacyOsc;
		oscClient.update();
	};
    
	if (bTcpEnabled){
		tcpClient.port = p_Settings->oscPort;
		tcpClient.update();
		tcpClient.send();
	}
    
    if (bWebSocketsEnabled){
        if (p_Settings->webSocketPort != webSocketServer.getPort()){
            webSocketServer.close();
            webSocketServer.setup( p_Settings->webSocketPort );
        }
        //sent automagically
        webSocketServer.send();
    }
}
示例#5
0
 //------------------------------------------------------------------------
 ofPixelsRef CvProcessor::process ( ofBaseImage & image ){
     if ( bTrackHaar ){
         processHaar( cameraBabyImage );
     }
     
     if ( bTrackOpticalFlow ){
         processOpticalFlow( cameraSmallImage );
     }
     
     differencedImage.setFromPixels(image.getPixelsRef());
     ofxCv::threshold(differencedImage, threshold);
     
     // find contours
     contourFinder.setFindHoles( bFindHoles );
     contourFinder.setMinArea( minBlobArea * tspsWidth * tspsHeight );
     contourFinder.setMaxArea( maxBlobArea * tspsWidth * tspsHeight );
     contourFinder.findContours( differencedImage );
     
     // update people
     RectTracker& rectTracker    = contourFinder.getTracker();
     cv::Mat cameraMat           = toCv(cameraImage);
     
     //optical flow scale
     // float flowROIScale = tspsWidth/flow.getWidth();
     
     for(int i = 0; i < contourFinder.size(); i++){
         unsigned int id = contourFinder.getLabel(i);
         if(rectTracker.existsPrevious(id)) {
             CvPerson* p = (CvPerson *) getTrackedPerson(id);
             //somehow we are not tracking this person, safeguard (shouldn't happen)
             if(NULL == p){
                 ofLog(OF_LOG_WARNING, "Person::warning. encountered persistent blob without a person behind them\n");
                 continue;
             }
             p->oid = i; //hack ;(
             
             //update this person with new blob info
             // to-do: make centroid dampening dynamic
             p->update(true);
             
             
             //normalize simple contour
             for (int i=0; i<p->simpleContour.size(); i++){
                 p->simpleContour[i].x /= tspsWidth;
                 p->simpleContour[i].y /= tspsHeight;
             }
             
             //find peak in blob (only useful with depth cameras)
             cv::Point minLoc, maxLoc;
             double minVal = 0, maxVal = 0;
             cv::Rect rect;
             rect.x      = p->boundingRect.x;
             rect.y      = p->boundingRect.y;
             rect.width  = p->boundingRect.width;
             rect.height = p->boundingRect.height;
             cv::Mat roiMat(cameraMat, rect);
             cv::minMaxLoc( roiMat, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
             
             // set depth
             p->depth = p->highest.z / 255.0f;
             
             // set highest and lowest points: x, y, VALUE stored in .z prop
             // ease vals unless first time you're setting them
             if ( p->highest.x == -1 ){
                 p->highest.set(  p->boundingRect.x + maxLoc.x,  p->boundingRect.y + maxLoc.y, maxVal);
                 p->lowest.set(  p->boundingRect.x + minLoc.x,  p->boundingRect.y + minLoc.y, minVal);
             } else {
                 p->highest.x = ( p->highest.x * .9 ) + ( p->boundingRect.x + maxLoc.x ) * .1;
                 p->highest.y = ( p->highest.y * .9 ) + ( p->boundingRect.y + maxLoc.y ) * .1;
                 p->highest.z = ( p->highest.z * .9) + ( maxVal ) * .1;
                 p->lowest.x = ( p->lowest.x * .9 ) + ( p->boundingRect.x + minLoc.x ) * .1;
                 p->lowest.y = ( p->lowest.y * .9 ) + ( p->boundingRect.y + minLoc.y ) * .1;
                 p->lowest.z = ( p->lowest.z * .9) + ( minVal ) * .1;
             }
             
             // cap highest + lowest
             p->highest.x = (p->highest.x > tspsWidth ? tspsWidth : p->highest.x);
             p->highest.x = (p->highest.x < 0 ? 0 : p->highest.x);
             p->highest.y = (p->highest.y > tspsHeight ? tspsHeight : p->highest.y);
             p->highest.y = (p->highest.y < 0 ? 0 : p->highest.y);
             
             p->lowest.x = (p->lowest.x > tspsWidth ? tspsWidth : p->lowest.x);
             p->lowest.x = (p->lowest.x < 0 ? 0 : p->highest.x);
             p->lowest.y = (p->lowest.y > tspsHeight ? tspsHeight : p->lowest.y);
             p->lowest.y = (p->lowest.y < 0 ? 0 : p->highest.y);
             
             // ROI for opticalflow
             ofRectangle roi = p->getBoundingRectNormalized(tspsWidth, tspsHeight);
             roi.x *= flow.getWidth();
             roi.y *= flow.getHeight();
             roi.width *= flow.getWidth();
             roi.height *= flow.getHeight();                
             
             // sum optical flow for the person
             if ( bTrackOpticalFlow && bFlowTrackedOnce ){
                 // TO-DO!
                 p->opticalFlowVectorAccumulation = flow.getAverageFlowInRegion(roi);
             } else {
                 p->opticalFlowVectorAccumulation.x = p->opticalFlowVectorAccumulation.y = 0;
             }
             
             //detect haar patterns (faces, eyes, etc)
             if ( bTrackHaar ){
                 //find the region of interest, expanded by haarArea.
                 ofRectangle haarROI;
                 haarROI.x		= (p->boundingRect.x - haarAreaPadding/2) * haarTrackingScale > 0.0f ? (p->boundingRect.x - haarAreaPadding/2) * haarTrackingScale : 0.0;
                 haarROI.y		= (p->boundingRect.y - haarAreaPadding/2) * haarTrackingScale > 0.0f ? (p->boundingRect.y - haarAreaPadding/2) : 0.0f;
                 haarROI.width	= (p->boundingRect.width  + haarAreaPadding*2) * haarTrackingScale > cameraBabyImage.width ? (p->boundingRect.width  + haarAreaPadding*2) * haarTrackingScale : cameraBabyImage.width;
                 haarROI.height	= (p->boundingRect.height + haarAreaPadding*2) * haarTrackingScale > cameraBabyImage.height ? (p->boundingRect.height + haarAreaPadding*2) * haarTrackingScale : cameraBabyImage.height;
                 
                 bool haarThisFrame = false;
                 for(int j = 0; j < haarObjects.size(); j++) {
                     ofRectangle hr = toOf(haarObjects[j]);
                     
                     //check to see if the haar is contained within the bounding rectangle
                     if(hr.x > haarROI.x && hr.y > haarROI.y && hr.x+hr.width < haarROI.x+haarROI.width && hr.y+hr.height < haarROI.y+haarROI.height){
                         hr.x /= haarTrackingScale;
                         hr.y /= haarTrackingScale;
                         hr.width /= haarTrackingScale;
                         hr.height /= haarTrackingScale;
                         p->setHaarRect(hr);
                         haarThisFrame = true;
                         break;
                     }
                 }
                 if(!haarThisFrame){
                     p->noHaarThisFrame();
                 }
             }
             personUpdated(p, scene);
         } else {
             ofPoint centroid = toOf(contourFinder.getCentroid(i));
             CvPerson* newPerson = new CvPerson(id, i, contourFinder);
             personEntered(newPerson, scene);
         }
     }
     
     //reset scene
     if ( bTrackOpticalFlow && bFlowTrackedOnce ){
         scene->averageMotion = flow.getAverageFlow();
     } else {
         scene->averageMotion = ofPoint(0,0);
     }
     scene->update( trackedPeople, tspsWidth, tspsHeight );
     
     // delete old blobs
     for (int i=trackedPeople->size()-1; i>=0; i--){
         Person* p = (*trackedPeople)[i];
         EventArgs args;
         args.person = p;
         args.scene  = scene;
         
         if (p == NULL){
             personWillLeave(p, scene);
             trackedPeople->erase(trackedPeople->begin() + i);
         } else if ( !(rectTracker.existsPrevious( p->pid ) && rectTracker.existsCurrent(p->pid)) && !rectTracker.existsCurrent(p->pid) ){
             personWillLeave(p, scene);
             trackedPeople->erase(trackedPeople->begin() + i);
         }
     }
     return differencedImage.getPixelsRef();
 }