Пример #1
0
	void update() {
#ifdef INSTALL
		if(cam.update()) {
			ofPixels& pixels = cam.getColorPixels();
#else
		cam.update();
		if(cam.isFrameNew()) {
			ofPixels& pixels = cam.getPixelsRef();
#endif
			// next two could be replaced with one line
			ofxCv::rotate90(pixels, rotated, rotate ? 270 : 0);
			ofxCv:flip(rotated, rotated, 1);
			Mat rotatedMat = toCv(rotated);
			if(tracker.update(rotatedMat))  {
				ofVec2f position = tracker.getPosition();
				vector<FaceTrackerData*> neighbors = data.getNeighborsCount(position, neighborCount);
				FaceTrackerData curData;
				curData.load(tracker);
				if(!neighbors.empty()) {
					nearestData = *faceCompare.nearest(curData, neighbors);
					if(nearestData.label != lastLabel) {
						similar.loadImage(nearestData.getImageFilename());
#ifdef INSTALL
						whitePoint = getWhitePoint(similar);
#else
						whitePoint.set(1, 1, 1);
#endif
					}
					lastLabel = nearestData.label;
				}
				if(faceCompare.different(curData, currentData) && faceCompare.different(curData, neighbors)) {
					saveFace(curData, rotated);
					currentData.push_back(pair<ofVec2f, FaceTrackerData>(position, curData));
				}
			}
			presence.update(tracker.getFound());
			if(presence.wasTriggered()) {
				presenceFade.stop();
			}
			if(presence.wasUntriggered()) {
				for(int i = 0; i < currentData.size(); i++) {
					data.add(currentData[i].first, currentData[i].second);
				}
				currentData.clear();
				presenceFade.start();
			}
		}
	}
	void draw() {
		ofBackground(255);
		CGDisplayHideCursor(NULL);
		ofSetColor(255);
		if(similar.isAllocated()) {
			shader.begin();
			shader.setUniformTexture("tex", similar, 0);
			shader.setUniform3fv("whitePoint", (float*) &whitePoint);
			similar.draw(0, 0);
			shader.end();
		}
		ofPushStyle();
		if(presenceFade.getActive()) {
			ofSetColor(0, ofMap(presenceFade.get(), 0, 1, 0, 128));
			ofFill();
			ofRect(0, 0, ofGetWidth(), ofGetHeight());
			ofSetColor(255, ofMap(presenceFade.get(), 0, 1, 0, 32));
			data.drawBins();
			ofSetColor(255, ofMap(presenceFade.get(), 0, 1, 0, 64));
			data.drawData();
		}
		ofSetColor(255, 64);
		ofNoFill();
		if(!tracker.getFound()) {
			ofCircle(tracker.getPosition(), 10);
		}
		tracker.draw();
		ofPopStyle();
		
#ifndef INSTALL
		drawFramerate();
#endif
	}
Пример #2
0
//--------------------------------------------------------------
void contenidor(Cam cam, ofImage *image){

    float minx = 1024;
    int ixmin = 0;
    int deltak = 10;
    float xco, yco;
    cv::Point punt;
    float recta_sx[cam.resy-1], recta_ix[cam.resy-1], recta_sy[cam.resy-1], recta_iy[cam.resy-1];
    double scx, scxx, scy, scxy;
    double meanc_x, meanc_y;
    double varcx, covc;
    double paramcs0, paramcs1, paramci0, paramci1;
    float Xco, Yco, Xc1, Yc1, Zco, Zc1;
    double delta_alfa, delta_alfa1;
    Mat ima_aux = toCv(*image);

    for(int i=0; i<=cam.resy-1; i++){
        if(cam.p[i].x < minx){
            ixmin = i;
            minx = cam.p[i].x;
        }
    }

    int j = 0;
    for(int i=0; i<=ixmin-deltak; i++){
        if(cam.p[i].x != 1024){
            recta_sx[j] = cam.p[i].x;
            recta_sy[j] = cam.p[i].y;
            j = j + 1;
            scx += cam.p[i].x;
            scy += cam.p[i].y;
            scxy += cam.p[i].x * cam.p[i].y;
            scxx += cam.p[i].x * cam.p[i].x;
        }
    }

    double mean_cx = scx / j;
    double mean_cy = scy / j;

    varcx = scxx - scx * mean_cx;
    covc = scxy - scx * mean_cy;

    // check for zero varx
    paramcs0 = covc / varcx;
    paramcs1= mean_cy - paramcs0 * mean_cx;


    scx = scy = scxy = scxx = 0;

    int k = 0;
    for(int i=ixmin+deltak; i<=cam.resy-1; i++){
        if(cam.p[i].x != 1024){
            recta_ix[k] = cam.p[i].x;
            recta_iy[k] = cam.p[i].y;
            k = k + 1;
            scx += cam.p[i].x;
            scy += cam.p[i].y;
            scxy += cam.p[i].x * cam.p[i].y;
            scxx += cam.p[i].x * cam.p[i].x;
        }
    }

    mean_cx = scx / k;
    mean_cy = scy / k;

    varcx = scxx - scx * mean_cx;
    covc = scxy - scx * mean_cy;

    // check for zero varx
    paramci0 = covc / varcx;
    paramci1= mean_cy - paramci0 * mean_cx;

    xco = (-paramcs1+paramci1) / (paramcs0-paramci0);
    yco = paramcs0 * xco + paramcs1;

    punt.x = xco;
    punt.y = yco;

    circle(ima_aux, punt, 10, Scalar( 20,255,255 ),2, 1, 0);

    ofImage im;

    toOf(ima_aux,im);
    im.update();

    *image = im;

    if( (j!=0)&&(k!=0) ){
        cam_dis(cam,1, xco,yco,&Xco,&Yco);
        cam_dis(cam,1, xco+50,paramci0*(xco+50)+paramci1,&Xc1,&Yc1);

        delta_alfa=(PI/180.0f) * cam.alfa * (1-((float)yco/(float)(cam.resy/2.0f)));

        if(delta_alfa != 0){
            delta_alfa1 = sqrt(Xco*Xco+Yco*Yco)/cos(delta_alfa);
        }

        Zco = delta_alfa1 * sin(delta_alfa);

        delta_alfa = (PI/180.0f) * cam.alfa * (1-((float)(paramci0*(xco+50)+paramci1)/(float)(cam.resy/2.0f)));

        if (delta_alfa != 0){
            delta_alfa1 = sqrt(Xc1*Xc1+Yc1*Yc1)/cos(delta_alfa);
        }

        Zc1 = delta_alfa1 * sin(delta_alfa);

        float anglec;

        if(Zc1-Zco != 0){
            anglec = -1 * atan((Yc1-Yco)/(Zc1-Zco));
        }
        else{
            anglec = 0.0f;
        }

        ofLogError() << "3Dscan::contenidor: Posició Vertex: " << Yco << "," << Zco << endl;
        ofLogError() << "3Dscan::contenidor: Posició centre Contenidor: " << Yco + 25 * sin(anglec) + 25 * cos(anglec) << "," << Zco - 25 * cos(anglec) + 25 * sin(anglec) << endl;
        ofLogError() << "3Dscan::contenidor: Angle camió: " << (180.0f/PI) * anglec << endl;
//        printf("Posició vertex-->Y:%f Z:%f \n",Yco,Zco);
//        printf("Posició centre-->Y:%f Z:%f \n",Yco+25*sin(anglec)+25*cos(anglec),Zco-25*cos(anglec)+25*sin(anglec));
//        printf("Angle/camió: %f \n", (180/PI)*anglec);
//        circle(image1, punt, 10, Scalar( 20,255,255 ),2, 1, 0);
//        contenidor

    } // end if( (j!=0)&&(k!=0) )
}
Пример #3
0
//--------------------------------------------------------------
void scan(Cam *cam, ofImage *grislaser, ofImage *TaL, ofImage *TsL){

    Mat image1;
    Mat Laser1;
    Mat Tot, gris, grisc;

    Mat HSV;
    Mat threshold1;

//    camera(cam);

    int valueRL = 60;
    int valueGL = 0;
    int valueBL = 0;

    Mat tt1, tt2, tt3, colo;

    Mat matImg;

    cv::Point punt;

    tt1 = toCv(*TaL).clone();
    Laser1 = tt1.clone();

    tt2 = toCv(*TsL).clone();
    Tot = tt2.clone();
    Mat th1, th2;
    Mat image2;

    absdiff(Laser1, Tot, image1);
    cvtColor(image1, HSV, CV_BGR2HSV);
    inRange(HSV, Scalar(cam->Bi, cam->Gi, cam->Ri), Scalar(cam->Bs, cam->Gs, cam->Rs), threshold1);
    th1 = threshold1.clone();
    image2 = image1.clone();
    GaussianBlur(threshold1, th1, cv::Size(1,1), 0,0);
    GaussianBlur(image2, image1, cv::Size(cam->blur_ksizew, cam->blur_ksizeh), cam->blur_sigmax, cam->blur_sigmay);
    cam_cap_subpixel(cam, image1, threshold1);

    cvtColor(image1, gris, CV_BGR2GRAY);
    cvtColor(gris, grisc, CV_GRAY2BGR);

    for(int i=0; i<cam->resy; i++){
        cv::Point paux1;
        paux1.x = (int)cam->p[i].x;
        paux1.y = (int)cam->p[i].y;

        line(grisc, paux1, paux1, Scalar(255,0,0), 1,8,0);
    }

    ofImage gl,L1,Tt;

    toOf(grisc, gl);
    gl.update();

    *grislaser = gl;

    toOf(Laser1, L1);
    L1.update();

    *TaL = L1;

    toOf(Tot, Tt);
    Tt.update();

    *TsL = Tt;
}
Пример #4
0
	cv::RotatedRect fitEllipse(const ofPolyline& polyline) {
		return fitEllipse(Mat(toCv(polyline)));
	}
Пример #5
0
	void fitLine(const ofPolyline& polyline, ofVec2f& point, ofVec2f& direction) {
		Vec4f line;
		fitLine(Mat(toCv(polyline)), line, CV_DIST_L2, 0, .01, .01);
		direction.set(line[0], line[1]);
		point.set(line[2], line[3]);
	}
Пример #6
0
	vector<cv::Vec4i> convexityDefects(const ofPolyline& polyline) {
		vector<cv::Point2f> contour2f = toCv(polyline);
		vector<cv::Point2i> contour2i;
		Mat(contour2f).copyTo(contour2i);
		return convexityDefects(contour2i);
	}
Пример #7
0
	cv::RotatedRect minAreaRect(const ofPolyline& polyline) {
		return minAreaRect(Mat(toCv(polyline)));
	}
Пример #8
0
	ofPolyline convexHull(const ofPolyline& polyline) {
		vector<cv::Point2f> contour = toCv(polyline);
		vector<cv::Point2f> hull;
		convexHull(Mat(contour), hull);
		return toOf(hull);
	}
Пример #9
0
void FaceScanner::update()
{
    m_grabber.update();
    
    if(m_grabber.isFrameNew())
    {
        if (m_shouldTrack)
        {
            m_tracker.update(toCv(m_grabber));
            
            //----If we've found a face, we store its intrinsics and begin our scanning procedure
            if (m_tracker.getFound() &&                                                                 //Have we found a face?
                ofGetElapsedTimef() > 2.0f &&                                                           //Has it been at least 2 seconds?
                m_tracker.getImageFeature(ofxFaceTracker::FACE_OUTLINE).getArea() > MIN_FACE_AREA)      //If we've found a face, is it reasonably large?
            {
                ofLogNotice("Face Scanner") << "Found a face.";
                
                //----The FBOs are cleared IFF the last sequence drawn was the particle system (denoted by the boolean m_shouldClearAmbient)
                if (!m_shouldClearAmbient) m_inkRenderer->clear();
                
                m_inkRenderer->setDrawMode(InkRenderer::FOLLOWERS);
                convertColor(m_grabber, m_thresh, CV_RGB2GRAY);
                
                m_faceOutline =         m_tracker.getImageFeature(ofxFaceTracker::FACE_OUTLINE);
                m_faceCenter =          m_faceOutline.getCentroid2D();
                m_faceArea =            m_faceOutline.getArea();       //This is a hack: something is wrong with the sign of the value returned by getArea()
                m_faceBoundingBox =     m_faceOutline.getBoundingBox();
                
                m_shouldTrack =         false;
                m_drawFrameStart =      ofGetElapsedTimef();                //When did this scan start?
                
                scan(200, 10);                                               //Start at a threshold value of 200, and decrement by 5 for each iteration
            }
            else
            {
                //----If we don't see a face and it's been m_ambientTimeout seconds, enter ambient mode
                if ((ofGetElapsedTimef() - m_ambientFrameStart) >= m_ambientTimeout) {
                    
                    //----We only want to do these operations once, otherwise the FBOs will clear every frame, and the particle system will never be drawn
                    if (m_shouldClearAmbient)
                    {
                        ofLogNotice("Face Scanner") << "Entering ambient mode.";
                        
                        //----We tell the InkRenderer to draw particles after clearing the FBOs and resetting the "draw counter"
                        m_inkRenderer->setDrawMode(InkRenderer::PARTICLES);
                        m_inkRenderer->clear();
                        m_shouldClearAmbient = false;
                    }
                }
            }
        }
        else if ((ofGetElapsedTimef() - m_drawFrameStart) >= m_drawTimeout)
        {
            //----If we shouldn't be tracking, that means we've already found a face, so begin the countdown
            ofLogNotice("Face Scanner") << "Starting a new scan.";
            
            //----After this point, we might not see another face, so we record the current time and ready the InkRenderer for a particle simulation
            m_ambientFrameStart =   ofGetElapsedTimef();
            m_shouldClearAmbient =  true;
            ofSaveScreen("screenshots/image_" + ofGetTimestampString() + ".png");
            
            reset();
        }
    }
}
Пример #10
0
 //------------------------------------------------------------------------
 ofPixelsRef CvProcessor::process ( ofBaseImage & image ){
     if ( bTrackHaar ){
         processHaar( cameraBabyImage );
     }
     
     if ( bTrackOpticalFlow ){
         processOpticalFlow( cameraSmallImage );
     }
     
     differencedImage.setFromPixels(image.getPixelsRef());
     ofxCv::threshold(differencedImage, threshold);
     
     // find contours
     contourFinder.setFindHoles( bFindHoles );
     contourFinder.setMinArea( minBlobArea * tspsWidth * tspsHeight );
     contourFinder.setMaxArea( maxBlobArea * tspsWidth * tspsHeight );
     contourFinder.findContours( differencedImage );
     
     // update people
     RectTracker& rectTracker    = contourFinder.getTracker();
     cv::Mat cameraMat           = toCv(cameraImage);
     
     //optical flow scale
     // float flowROIScale = tspsWidth/flow.getWidth();
     
     for(int i = 0; i < contourFinder.size(); i++){
         unsigned int id = contourFinder.getLabel(i);
         if(rectTracker.existsPrevious(id)) {
             CvPerson* p = (CvPerson *) getTrackedPerson(id);
             //somehow we are not tracking this person, safeguard (shouldn't happen)
             if(NULL == p){
                 ofLog(OF_LOG_WARNING, "Person::warning. encountered persistent blob without a person behind them\n");
                 continue;
             }
             p->oid = i; //hack ;(
             
             //update this person with new blob info
             // to-do: make centroid dampening dynamic
             p->update(true);
             
             
             //normalize simple contour
             for (int i=0; i<p->simpleContour.size(); i++){
                 p->simpleContour[i].x /= tspsWidth;
                 p->simpleContour[i].y /= tspsHeight;
             }
             
             //find peak in blob (only useful with depth cameras)
             cv::Point minLoc, maxLoc;
             double minVal = 0, maxVal = 0;
             cv::Rect rect;
             rect.x      = p->boundingRect.x;
             rect.y      = p->boundingRect.y;
             rect.width  = p->boundingRect.width;
             rect.height = p->boundingRect.height;
             cv::Mat roiMat(cameraMat, rect);
             cv::minMaxLoc( roiMat, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
             
             // set depth
             p->depth = p->highest.z / 255.0f;
             
             // set highest and lowest points: x, y, VALUE stored in .z prop
             // ease vals unless first time you're setting them
             if ( p->highest.x == -1 ){
                 p->highest.set(  p->boundingRect.x + maxLoc.x,  p->boundingRect.y + maxLoc.y, maxVal);
                 p->lowest.set(  p->boundingRect.x + minLoc.x,  p->boundingRect.y + minLoc.y, minVal);
             } else {
                 p->highest.x = ( p->highest.x * .9 ) + ( p->boundingRect.x + maxLoc.x ) * .1;
                 p->highest.y = ( p->highest.y * .9 ) + ( p->boundingRect.y + maxLoc.y ) * .1;
                 p->highest.z = ( p->highest.z * .9) + ( maxVal ) * .1;
                 p->lowest.x = ( p->lowest.x * .9 ) + ( p->boundingRect.x + minLoc.x ) * .1;
                 p->lowest.y = ( p->lowest.y * .9 ) + ( p->boundingRect.y + minLoc.y ) * .1;
                 p->lowest.z = ( p->lowest.z * .9) + ( minVal ) * .1;
             }
             
             // cap highest + lowest
             p->highest.x = (p->highest.x > tspsWidth ? tspsWidth : p->highest.x);
             p->highest.x = (p->highest.x < 0 ? 0 : p->highest.x);
             p->highest.y = (p->highest.y > tspsHeight ? tspsHeight : p->highest.y);
             p->highest.y = (p->highest.y < 0 ? 0 : p->highest.y);
             
             p->lowest.x = (p->lowest.x > tspsWidth ? tspsWidth : p->lowest.x);
             p->lowest.x = (p->lowest.x < 0 ? 0 : p->highest.x);
             p->lowest.y = (p->lowest.y > tspsHeight ? tspsHeight : p->lowest.y);
             p->lowest.y = (p->lowest.y < 0 ? 0 : p->highest.y);
             
             // ROI for opticalflow
             ofRectangle roi = p->getBoundingRectNormalized(tspsWidth, tspsHeight);
             roi.x *= flow.getWidth();
             roi.y *= flow.getHeight();
             roi.width *= flow.getWidth();
             roi.height *= flow.getHeight();                
             
             // sum optical flow for the person
             if ( bTrackOpticalFlow && bFlowTrackedOnce ){
                 // TO-DO!
                 p->opticalFlowVectorAccumulation = flow.getAverageFlowInRegion(roi);
             } else {
                 p->opticalFlowVectorAccumulation.x = p->opticalFlowVectorAccumulation.y = 0;
             }
             
             //detect haar patterns (faces, eyes, etc)
             if ( bTrackHaar ){
                 //find the region of interest, expanded by haarArea.
                 ofRectangle haarROI;
                 haarROI.x		= (p->boundingRect.x - haarAreaPadding/2) * haarTrackingScale > 0.0f ? (p->boundingRect.x - haarAreaPadding/2) * haarTrackingScale : 0.0;
                 haarROI.y		= (p->boundingRect.y - haarAreaPadding/2) * haarTrackingScale > 0.0f ? (p->boundingRect.y - haarAreaPadding/2) : 0.0f;
                 haarROI.width	= (p->boundingRect.width  + haarAreaPadding*2) * haarTrackingScale > cameraBabyImage.width ? (p->boundingRect.width  + haarAreaPadding*2) * haarTrackingScale : cameraBabyImage.width;
                 haarROI.height	= (p->boundingRect.height + haarAreaPadding*2) * haarTrackingScale > cameraBabyImage.height ? (p->boundingRect.height + haarAreaPadding*2) * haarTrackingScale : cameraBabyImage.height;
                 
                 bool haarThisFrame = false;
                 for(int j = 0; j < haarObjects.size(); j++) {
                     ofRectangle hr = toOf(haarObjects[j]);
                     
                     //check to see if the haar is contained within the bounding rectangle
                     if(hr.x > haarROI.x && hr.y > haarROI.y && hr.x+hr.width < haarROI.x+haarROI.width && hr.y+hr.height < haarROI.y+haarROI.height){
                         hr.x /= haarTrackingScale;
                         hr.y /= haarTrackingScale;
                         hr.width /= haarTrackingScale;
                         hr.height /= haarTrackingScale;
                         p->setHaarRect(hr);
                         haarThisFrame = true;
                         break;
                     }
                 }
                 if(!haarThisFrame){
                     p->noHaarThisFrame();
                 }
             }
             personUpdated(p, scene);
         } else {
             ofPoint centroid = toOf(contourFinder.getCentroid(i));
             CvPerson* newPerson = new CvPerson(id, i, contourFinder);
             personEntered(newPerson, scene);
         }
     }
     
     //reset scene
     if ( bTrackOpticalFlow && bFlowTrackedOnce ){
         scene->averageMotion = flow.getAverageFlow();
     } else {
         scene->averageMotion = ofPoint(0,0);
     }
     scene->update( trackedPeople, tspsWidth, tspsHeight );
     
     // delete old blobs
     for (int i=trackedPeople->size()-1; i>=0; i--){
         Person* p = (*trackedPeople)[i];
         EventArgs args;
         args.person = p;
         args.scene  = scene;
         
         if (p == NULL){
             personWillLeave(p, scene);
             trackedPeople->erase(trackedPeople->begin() + i);
         } else if ( !(rectTracker.existsPrevious( p->pid ) && rectTracker.existsCurrent(p->pid)) && !rectTracker.existsCurrent(p->pid) ){
             personWillLeave(p, scene);
             trackedPeople->erase(trackedPeople->begin() + i);
         }
     }
     return differencedImage.getPixelsRef();
 }
Пример #11
0
void KinectController::updateAnalisys(ofxKinect & kinect, float bbX, float bbY, float bbZ, float bbW, float bbH, float bbD){

	if(kinect.isFrameNew()){
		static int counter = 0;
		int w = 640;
		int h = 480;

#if ANGLE_FROM_ACCEL
		//ofVec3f worldOffset(worldX,worldY,worldZ);
		ofMatrix4x4 m = camTransform.getModelViewMatrix();
		int i = 0;
		for(int y = 0; y < h; y +=step) {
			for(int x = 0; x < w; x +=step) {
				const short & distance = kinect.getRawDepthPixelsRef()[x+y*w];
				//if(distance > 0) {
					const ofVec3f  & v = kinect.getWorldCoordinateAt(x, y, distance);
					/*if(correctAngle){
						v = v*m;
					}
					if(!applyBB || insideBB3D(v,ofVec3f(bbX,bbY,bbZ), ofVec3f(bbW, bbH, bbD))){*/
						mesh.getVertices()[i] = v;
					/*}else{
						mesh.getVertices()[i].set(0,0,0);
					}*/
				//}
				i++;
			}
		}
#elif ANGLE_FROM_PCL_GROUND_PLANE
		pcl::PointCloud<pcl::PointXYZ>::Ptr pcPtr = pcl::PointCloud<pcl::PointXYZ>::Ptr(new pcl::PointCloud<pcl::PointXYZ>);
		for(int y = 0; y < h; y += step) {
			for(int x = 0; x < w; x += step) {
				//if(kinect.getDistanceAt(x, y) > 0) {
					float z = kinect.getDistanceAt(x, y);
					pcPtr->push_back(pcl::PointXYZ(x,y,z));
				//}
			}
		}
		pcPtr->width=640;
		pcPtr->height=480;

		pcl::ModelCoefficients::Ptr planeCoeffs = fitPlane<pcl::PointXYZ>(pcPtr,10,5);
		plane.set(planeCoeffs->values[0],planeCoeffs->values[1],planeCoeffs->values[2],planeCoeffs->values[3]);

		for(int i=0;i<pcPtr->points.size();i++){
			ofVec3f v(pcPtr->points[i].x,pcPtr->points[i].y,pcPtr->points[i].z);
			if(plane.distanceToPoint(v)>60)
				mesh.addVertex( kinect.getWorldCoordinateAt(v.x, v.y, v.z) );
		}

		//pcPtr = findAndSubtractPlane<pcl::PointXYZ>(pcPtr,60,5);
		//ofxPCL::toOf(pcPtr,mesh,1,1,1);
#elif CV_ANALISYS
		cv::Mat backgroundMat = toCv(background);
		if(captureBg>0){
			backgroundMat += toCv(kinect.getDistancePixelsRef());
			captureBg--;
		}else if(captureBg==0){
			backgroundMat /= 10;
			cv::GaussianBlur(backgroundMat,backgroundMat,Size(11,11),10);
			captureBg=-1;
		}else{
			// difference threshold
			cv::Mat diffMat = toCv(diff);
			cv::Mat currentMat = toCv(kinect.getDistancePixelsRef());
			cv::Mat gaussCurrentMat = toCv(gaussCurrent);
			cv::GaussianBlur(currentMat,gaussCurrentMat,Size(11,11),10);
			cv::absdiff(backgroundMat, gaussCurrentMat, diffMat);
			//diffMat = toCv(background) - toCv(kinect.getDistancePixelsRef());
			threshold(diff,thresPix,.001);
			thres8Bit = thresPix;

			cv::Mat kernel;
			cv::Mat thresMat = toCv(thres8Bit);
			cv::Point anchor(-1,-1);
			erode(toCv(thres8Bit),thresMat,kernel,anchor,10);

			dilate(toCv(thres8Bit),thresMat,kernel,anchor,5);


			cv::Mat fgMat = toCv(fg);
			bgSubstractor(toCv(thres8Bit),fgMat);
			contourFinder.findContours(fg);

			for(int i=0;i<oscContours.size();i++){
				oscContours[i]->newFrame(frame);
			}


			polylines = contourFinder.getPolylines();
			for(int i=0;i<(int)polylines.size();i++){
				ofPoint centroid = polylines[i].getCentroid2D();
				polylines[i] = polylines[i].getResampledByCount(16);
				float z = kinect.getDistanceAt(centroid);
				for(int j=0;j<oscContours.size();j++){
					oscContours[j]->sendBlob(polylines[i],z);
				}
			}
			frame++;

			if(recording){

				//convertColor(kinect.getDepthPixelsRef(),rgbDepth,CV_GRAY2RGB);
				recorder.addFrame(kinect.getRawDepthPixelsRef());
			}
		}
#endif
	}
}
Пример #12
0
//--------------------------------------------------------------
void ofApp::update(){
    
    kinect.update();
    
    deltaTime = ofGetElapsedTimef() - lastTime;
    lastTime = ofGetElapsedTimef();
    
    if (kinect.isFrameNew()) {

        // Load grayscale depth image from the kinect source
        grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height, OF_IMAGE_GRAYSCALE);
        
        // Threshold image
        threshold(grayImage, grayThreshNear, nearThreshold, true);
        threshold(grayImage, grayThreshFar, farThreshold);
        
        // Convert to CV to perform AND operation
        Mat grayThreshNearMat = toCv(grayThreshNear);
        Mat grayThreshFarMat = toCv(grayThreshFar);
        Mat grayImageMat = toCv(grayImage);
        
        // cvAnd to get the pixels which are a union of the two thresholds
        bitwise_and(grayThreshNearMat, grayThreshFarMat, grayImageMat);
        
        // Save pre-processed image for drawing it
        grayPreprocImage = grayImage;
        
        // Process image
        dilate(grayImage);
        dilate(grayImage);
        //erode(grayImage);
        
        // Mark image as changed
        grayImage.update();
        
        // Find contours
        //contourFinder.setThreshold(ofMap(mouseX, 0, ofGetWidth(), 0, 255));
        contourFinder.findContours(grayImage);
        
        ofPushStyle();
        ofEnableBlendMode(OF_BLENDMODE_DISABLED);
        cameraFbo.begin();
        ofSetColor(ofColor::white);
        if (doFlipCamera)
            kinect.drawDepth(cameraFbo.getWidth(), 0, -cameraFbo.getWidth(), cameraFbo.getHeight());  // Flip Horizontal
        else
            kinect.drawDepth(0, 0, cameraFbo.getWidth(), cameraFbo.getHeight());
        cameraFbo.end();
        ofDisableBlendMode();
        ofPopStyle();
        
        opticalFlow.setSource(cameraFbo.getTexture());
        opticalFlow.update(deltaTime);
        
        velocityMask.setDensity(cameraFbo.getTexture());
        velocityMask.setVelocity(opticalFlow.getOpticalFlow());
        velocityMask.update();
    }
    
    
    fluidSimulation.addVelocity(opticalFlow.getOpticalFlowDecay());
    fluidSimulation.addDensity(velocityMask.getColorMask());
    fluidSimulation.addTemperature(velocityMask.getLuminanceMask());
    
    mouseForces.update(deltaTime);
    
    for (int i=0; i<mouseForces.getNumForces(); i++) {
        if (mouseForces.didChange(i)) {
            switch (mouseForces.getType(i)) {
                case FT_DENSITY:
                    fluidSimulation.addDensity(mouseForces.getTextureReference(i), mouseForces.getStrength(i));
                    break;
                case FT_VELOCITY:
                    fluidSimulation.addVelocity(mouseForces.getTextureReference(i), mouseForces.getStrength(i));
                    particleFlow.addFlowVelocity(mouseForces.getTextureReference(i), mouseForces.getStrength(i));
                    break;
                case FT_TEMPERATURE:
                    fluidSimulation.addTemperature(mouseForces.getTextureReference(i), mouseForces.getStrength(i));
                    break;
                case FT_PRESSURE:
                    fluidSimulation.addPressure(mouseForces.getTextureReference(i), mouseForces.getStrength(i));
                    break;
                case FT_OBSTACLE:
                    fluidSimulation.addTempObstacle(mouseForces.getTextureReference(i));
                default:
                    break;
            }
        }
    }
    
    fluidSimulation.update();
    
    if (particleFlow.isActive()) {
        particleFlow.setSpeed(fluidSimulation.getSpeed());
        particleFlow.setCellSize(fluidSimulation.getCellSize());
        particleFlow.addFlowVelocity(opticalFlow.getOpticalFlow());
        particleFlow.addFluidVelocity(fluidSimulation.getVelocity());
        //		particleFlow.addDensity(fluidSimulation.getDensity());
        particleFlow.setObstacle(fluidSimulation.getObstacle());
    }
    particleFlow.update();
    
}