예제 #1
0
	void Calibration::draw(int i) const {
		ofPushStyle();
		ofNoFill();
		ofSetColor(ofColor::red);
		for(int j = 0; j < imagePoints[i].size(); j++) {
			ofCircle(toOf(imagePoints[i][j]), 5);
		}
		ofPopStyle();
	}
예제 #2
0
std::vector<ofPolyline> Clipper::simplifyPolylines(const std::vector<ofPolyline>& polylines,
                                                   ofPolyWindingMode windingMode,
                                                   ClipperLib::cInt scale)
{
    ClipperLib::Paths out;
    ClipperLib::SimplifyPolygons(toClipper(polylines, scale),
                                 out,
                                 toClipper(windingMode));
    return toOf(out, true, scale);
}
예제 #3
0
void testApp::updateTrianglesRandom() {
	Mat mat = Mat(kinect.getHeight(), kinect.getWidth(), CV_32FC1, kinect.getDistancePixels());
	
	Sobel(mat, sobelxy, CV_32F, 1, 1);
	
	sobelxy = abs(sobelxy);
	int randomBlur = panel.getValueI("randomBlur") * 2 + 1;
	boxFilter(sobelxy, sobelbox, 0, cv::Size(randomBlur, randomBlur), Point2d(-1, -1), false);
	
	triangulator.reset();
	points.clear();
	int i = 0;
	attempts = 0;
	int randomCount = panel.getValueI("randomCount");
	float randomWeight = panel.getValueF("randomWeight");
	while(i < randomCount) {
		Point2d curPosition(1 + (int) ofRandom(sobelbox.cols - 3), 
												1 + (int) ofRandom(sobelbox.rows - 3));
		float curSample = sobelbox.at<unsigned char>(curPosition) / 255.f;
		float curGauntlet = powf(ofRandom(0, 1), 2 * randomWeight);
		if(curSample > curGauntlet) {
			points.push_back(toOf(curPosition));
			triangulator.addPoint(curPosition.x, curPosition.y, 0);
			sobelbox.at<unsigned char>(curPosition) = 0; // don't do the same point twice
			i++;
		}
		attempts++;
		if(i > attempts * 100) {
			break;
		}
	}
	
	// add the edges
	int w = mat.cols;
	int h = mat.rows;
	for(int x = 0; x < w; x++) {
		triangulator.addPoint(x, 0, 0);
		triangulator.addPoint(x, h - 1, 0);
	}
	for(int y = 0; y < h; y++) {
		triangulator.addPoint(0, y, 0);
		triangulator.addPoint(w - 1, y, 0);
	}
	
	triangulator.triangulate();
	
	int n = triangulator.triangles.size();
	triangles.resize(n);
	for(int i = 0; i < n; i++) {
		triangles[i].vert3 = triangulator.triangles[i].points[0];
		triangles[i].vert2 = triangulator.triangles[i].points[1];
		triangles[i].vert1 = triangulator.triangles[i].points[2];
	}
}
예제 #4
0
//--------------------------------------------------------------
void ofApp::autoSetKinectWarp(){
    
    // uses openCV to find bounding box of sandbox, and then the inset corners
    // these becomes warp coordinates for Kinect<->projection quad-warp registration
    
    contourFinder.setMinAreaRadius(150);
    contourFinder.setMaxAreaRadius(640);
    contourFinder.setThreshold(140);
    contourFinder.setFindHoles(false);
    contourFinder.setSortBySize(true);
    contourFinder.findContours(kinectRaw);
    
    vector<cv::Point> quad = contourFinder.getFitQuad(contourFinder.size()-1);
    
    // save to warp coords
    
    ofPolyline poly;
    for (int p=0; p<4; p++){
        poly.addVertex(toOf(quad[p]));
    }
    
    // sort clockwise
    kinectCorners[TOP_LEFT] = poly.getClosestPoint(ofVec2f(0,0)) + ofVec2f(10,10);
    kinectCorners[TOP_RIGHT] = poly.getClosestPoint(ofVec2f(640,0)) + ofVec2f(-10,10);
    kinectCorners[BOTTOM_RIGHT] = poly.getClosestPoint(ofVec2f(640,480)) + ofVec2f(-10,-10);
    kinectCorners[BOTTOM_LEFT] = poly.getClosestPoint(ofVec2f(0,480)) + ofVec2f(10,-10);
    
    for (int p=0; p<4; p++) {
        
        // convert kinect image crop coords to warp coords
        ofVec2f corner = kinectCorners[p];

        switch (p){
            case 0:
                corner.set(0-corner.x,0-corner.y);
                break;
            case 1:
                corner.set(1280-corner.x,0-corner.y);
                break;
            case 2:
                corner.set(1280-corner.x,960-corner.y);
                break;
            case 3:
                corner.set(0-corner.x,960-corner.y);
                break;
        }
        warper.setCorner(ofxGLWarper::CornerLocation(p),corner);
    }
    
    bWarped = true;
    
}
예제 #5
0
ofMesh toOf(const glmMesh &_mesh){
    ofMesh mesh;
    
    for (auto &it : _mesh.getColors()) {
        mesh.addColor(toOf(it));
    }
    
    for (auto &it : _mesh.getVertices()) {
        mesh.addVertex(toOf(it));
    }
    
    for (auto &it : _mesh.getNormals()) {
        mesh.addNormal(toOf(it));
    }
    
    for (auto &it : _mesh.getTexCoords()) {
        mesh.addTexCoord(toOf(it));
    }
    
    for (auto &it : _mesh.getIndices()) {
        mesh.addIndex(toOf(it));
    }
    
    DrawMode drawMode = _mesh.getDrawMode();
    
    if(drawMode == POINTS){
        mesh.setMode(OF_PRIMITIVE_POINTS);
    } else if(drawMode == LINES){
        mesh.setMode(OF_PRIMITIVE_LINES);
    } else if(drawMode == LINE_STRIP){
        mesh.setMode(OF_PRIMITIVE_LINE_STRIP);
    } else if(drawMode == TRIANGLES){
        mesh.setMode(OF_PRIMITIVE_TRIANGLES);
    } else if(drawMode == TRIANGLE_STRIP){
        mesh.setMode(OF_PRIMITIVE_TRIANGLE_STRIP);
    }
    
    return mesh;
}
void ofApp::draw() {
    // GUI
    ofBackground(0);
    ofSetColor(255);
    ofPushMatrix();
    kinect.drawImage(0, 0);
    ofTranslate(640, 0);
    grayImage.draw(0, 0);
    ofTranslate(-640, 480);
    contourFinder.draw();
    ofTranslate(640, 0);
    ofPopMatrix();
    
    gui.draw();
    
    // MAIN WINDOW
    projector.begin();
    
    ofBackground(0);
    
    RectTracker& tracker = contourFinder.getTracker();
    
    for(int i = 0; i < contourFinder.size(); i++) {
        // get contour, label, center point, and age of contour
        vector<cv::Point> points = contourFinder.getContour(i);
        int label = contourFinder.getLabel(i);
        ofPoint center = toOf(contourFinder.getCenter(i));
        int age = tracker.getAge(label);
        
        // map contour using calibration and draw to main window
        ofBeginShape();
        ofFill();
        ofSetColor(blobColors[label % 12]);
        for (int j=0; j<points.size(); j++) {
            ofPoint depthPoint = ofPoint((int)points[j].x, (int)points[j].y, depthPixels[(int)points[j].x + (int)points[j].y * kinect.getWidth()]);
            ofVec3f worldPoint = kinect.projectiveToWorld(depthPoint);
            ofVec2f projectedPoint = kpt.getProjectedPoint(worldPoint);
            ofVertex(projector.getWidth() * projectedPoint.x, projector.getHeight() * projectedPoint.y);
        }
        ofEndShape();
    }
    
    projector.end();
}
//--------------------------------------------------------------
void ofApp::draw() {
    ofSetWindowTitle(ofToString(ofGetFrameRate()));

    // draw the gui
    ofSetColor(255);
    fboMask.draw(0, 0);
    ofTranslate(512, 0);
    grayImage.draw(0, 0);
    contourFinder.draw();
    ofTranslate(-512, 0);

    // draw projected contours into projector window
    projector.begin();

    ofBackground(0);

    RectTracker& tracker = contourFinder.getTracker();

    for(int i = 0; i < contourFinder.size(); i++) {
        // get contour, label, center point, and age of contour
        vector<cv::Point> points = contourFinder.getContour(i);
        int label = contourFinder.getLabel(i);
        ofPoint center = toOf(contourFinder.getCenter(i));
        int age = tracker.getAge(label);

        // map contour using calibration and draw to main window
        ofBeginShape();
        ofFill();
        ofSetColor(blobColors[label % 12]);
        for (int j=0; j<points.size(); j++) {
            ofVec3f worldPoint = kinect.getWorldCoordinateAt(points[j].x, points[j].y);
            if (worldPoint.z == 0) continue;
            ofVec2f projectedPoint = kpt.getProjectedPoint(worldPoint);
            ofVertex(projector.getWidth() * projectedPoint.x, projector.getHeight() * projectedPoint.y);
        }
        ofEndShape();
    }

    projector.end();

    gui.draw();
}
예제 #8
0
void ofxFBXScene::parseRotationCurve(ofxFBXNode & node, FbxAnimLayer * pAnimLayer, FbxNode* fbxNode, FbxPropertyT<FbxDouble3> &rotation){
	node.originalRotation = ofQuaternion(rotation.Get().mData[0], ofVec3f(1, 0, 0), rotation.Get().mData[1], ofVec3f(0, 1, 0), rotation.Get().mData[2], ofVec3f(0, 0, 1));
	node.getNode().setOrientation(node.originalRotation);
	ofLogVerbose("ofxFBXScene") << "original rotation " << endl << node.originalRotation << endl;

	if(!rotation.GetCurve(pAnimLayer)) return;
	FbxAnimCurve* lAnimCurveX = rotation.GetCurve(pAnimLayer,"X");
	FbxAnimCurve* lAnimCurveY = rotation.GetCurve(pAnimLayer,"Y");
	FbxAnimCurve* lAnimCurveZ = rotation.GetCurve(pAnimLayer,"Z");


    int xKeyCount = lAnimCurveX ? lAnimCurveX->KeyGetCount() : 0;
    int yKeyCount = lAnimCurveY ? lAnimCurveY->KeyGetCount() : 0;
    int zKeyCount = lAnimCurveZ ? lAnimCurveZ->KeyGetCount() : 0;

	FbxTime   lKeyTime;
	int     lCount;
	FbxTime lXKeyTime,lYKeyTime,lZKeyTime;
	for(lCount = 0; lCount < max(max(xKeyCount,yKeyCount),zKeyCount); lCount++)
	{
		if(lCount<xKeyCount){
			lXKeyTime  = lAnimCurveX->KeyGetTime(lCount);
		}
		if(lCount<yKeyCount){
			lYKeyTime  = lAnimCurveY->KeyGetTime(lCount);
		}
		if(lCount<zKeyCount){
			lZKeyTime  = lAnimCurveZ->KeyGetTime(lCount);
		}
		lKeyTime = min(min(lXKeyTime,lYKeyTime),lZKeyTime);
		lKeyTime = lXKeyTime;

		FbxAMatrix & matrix = fbxNode->EvaluateLocalTransform(lKeyTime);
		ofxFBXKey<ofQuaternion> key;
		ofVec3f t,s;
		ofQuaternion so;
		ofMatrix4x4 m = toOf(matrix);
		m.decompose(t,key.value,s,so);
		key.timeMillis = lKeyTime.GetMilliSeconds();
		node.rotationKeys.push_back(key);
	}
}
예제 #9
0
std::vector<ofPolyline> Clipper::getOffsets(const std::vector<ofPolyline>& polylines,
                                            double offset,
                                            ClipperLib::JoinType jointype,
                                            ClipperLib::EndType endtype,
                                            double miterLimit,
                                            double arcTolerance,
                                            ClipperLib::cInt scale)
{
    offset *= scale;
    miterLimit *= scale;
    arcTolerance *= scale;

    ClipperLib::Paths out;

    ClipperLib::ClipperOffset clipperOffset(miterLimit, arcTolerance);
    clipperOffset.AddPaths(toClipper(polylines, scale), jointype, endtype);
    clipperOffset.Execute(out, offset);

    return toOf(out, true, scale);
}
예제 #10
0
void ofxFBXScene::parsePositionCurve(ofxFBXNode & node, FbxAnimLayer * pAnimLayer, FbxPropertyT<FbxDouble3> &position){
	node.originalPosition = toOf(position.Get());
	node.getNode().setPosition(node.originalPosition);
	ofLogVerbose("ofxFBXScene") << "original position " << node.originalPosition << endl;


	if(!position.GetCurve(pAnimLayer)) return;
	FbxAnimCurve* lAnimCurveX = position.GetCurve(pAnimLayer,"X");
	FbxAnimCurve* lAnimCurveY = position.GetCurve(pAnimLayer,"Y");
	FbxAnimCurve* lAnimCurveZ = position.GetCurve(pAnimLayer,"Z");

    FbxTime   lKeyTime;
    int     lCount;

    int xKeyCount = lAnimCurveX? lAnimCurveX->KeyGetCount() : 0;
    int yKeyCount = lAnimCurveY? lAnimCurveY->KeyGetCount() : 0;
    int zKeyCount = lAnimCurveZ? lAnimCurveZ->KeyGetCount() : 0;

    ofxFBXKey<float> key;
    for(lCount = 0; lCount < xKeyCount; lCount++)
    {
    	key.value = lAnimCurveX->KeyGetValue(lCount);
        lKeyTime  = lAnimCurveX->KeyGetTime(lCount);
        key.timeMillis = lKeyTime.GetMilliSeconds();
        node.xKeys.push_back(key);
    }
    for(lCount = 0; lCount < yKeyCount; lCount++)
    {
    	key.value = lAnimCurveY->KeyGetValue(lCount);
        lKeyTime  = lAnimCurveY->KeyGetTime(lCount);
        key.timeMillis = lKeyTime.GetMilliSeconds();
        node.yKeys.push_back(key);
    }
    for(lCount = 0; lCount < zKeyCount; lCount++)
    {
    	key.value = lAnimCurveZ->KeyGetValue(lCount);
        lKeyTime  = lAnimCurveZ->KeyGetTime(lCount);
        key.timeMillis = lKeyTime.GetMilliSeconds();
        node.zKeys.push_back(key);
    }
}
예제 #11
0
glm::vec2 ofxChipmunk::Body::getPosition(){
	return toOf(cpBodyGetPosition(body));
}
예제 #12
0
void ofApp::update() {
    contourFinder.setMinArea(minArea);
    contourFinder.setMaxArea(maxArea);
    
    kinect.update();
    
    if(!kinect.isNewFrame()) {
        clean();
        return;
    }
    
    depthPixels = kinect.getDepthRawPixels();
    vector<int> eligibleUsers;
    
    if (useUserImage)
    {
        int numUsers = kinect.getNumTrackedUsers();
        for (int i=0; i<numUsers; i++)
        {
            ofxOpenNIUser & user = kinect.getTrackedUser(i);
            if (!user.getMaskPixels().isAllocated()) {continue;}
            
            user.setMaskPixelFormat(OF_PIXELS_MONO);
            grayImage.setFromPixels(user.getMaskPixels());
            contourFinder.findContours(grayImage);
            
            if (contourFinder.size() == 0) {continue;}
            eligibleUsers.push_back(user.getXnID());
            
            vector<ofVec2f> calibratedPoints;
            getCalibratedContour(depthPixels, contourFinder.getContour(0), calibratedPoints);
            
            if (!users.count(user.getXnID())) {
                users[user.getXnID()] = new Contour(calibratedPoints, toOf(contourFinder.getCenter(0)), user.getXnID());
            }
            else {
                users[user.getXnID()]->setPoints(calibratedPoints, toOf(contourFinder.getCenter(0)));
            }
            
            for (int j=1; j<contourFinder.size(); j++) {
                if (contourFinder.getContour(j).size() > users[user.getXnID()]->getNumPoints()) {
                    getCalibratedContour(depthPixels, contourFinder.getContour(j), calibratedPoints);
                    users[user.getXnID()]->setPoints(calibratedPoints, toOf(contourFinder.getCenter(j)));
                }
            }
        }
    }
    else
    {
        grayImage.setFromPixels(kinect.getDepthRawPixels());
        grayThreshNear = grayImage;
        grayThreshFar = grayImage;
        grayThreshNear.threshold(nearThreshold, true);
        grayThreshFar.threshold(farThreshold);
        cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        grayImage.flagImageChanged();
        
        // determine found contours
        contourFinder.findContours(grayImage);
        
        for(int i = 0; i < contourFinder.size(); i++) {
            vector<cv::Point> points = contourFinder.getContour(i);
            int label = contourFinder.getLabel(i);
            eligibleUsers.push_back(label);
            
            vector<ofVec2f> calibratedPoints;
            getCalibratedContour(depthPixels, points, calibratedPoints);
            if (!users.count(label)) {
                users[label] = new Contour(calibratedPoints, toOf(contourFinder.getCenter(i)), label);
            }
            else {
                users[label]->setPoints(calibratedPoints, toOf(contourFinder.getCenter(i)));
            }
        }
    }
    
    // get rid of old contours
    map<int, Contour*>::iterator itu = users.begin();
    while (itu != users.end()) {
        bool found = false;
        for (auto e : eligibleUsers) {
            if (e == itu->first) {
                found = true;
            }
        }
        if (!found) {
            delete itu->second;
            users.erase(itu++);
        }
        else {
            ++itu;
        }
    }
    
    clean();
}
예제 #13
0
ofxFBXNode * ofxFBXScene::parseCameraInfo(FbxNode* pNode, FbxAnimLayer * pAnimLayer){
    FbxCamera* lCamera = pNode->GetCamera();

	ofxFBXCamera camera;

	camera.nodeName = lCamera->GetName();

	parsePositionCurve(camera,pAnimLayer,pNode->LclTranslation);

	parseScaleCurve(camera,pAnimLayer,pNode->LclScaling);
	//camera.originalScale.set(1,1,1);

	parseRotationCurve(camera,pAnimLayer,pNode,pNode->LclRotation);

	camera.setPosition(camera.originalPosition);

	if(pNode->GetTarget()){
		camera.target = ofxFBXNodePosition(pNode->GetTarget());
		//TODO: process lookAt animation
	}else{
		camera.target = toOf(lCamera->InterestPosition.Get());
	}


	float lNearPlane = lCamera->GetNearPlane();
	float lFarPlane = lCamera->GetFarPlane();

	//Get global scaling.
	FbxVector4 lCameraScaling = pNode->Scaling.Get();//GetGlobalPosition(pNode, 0).GetS();
	static const int  FORWARD_SCALE = 2;

	//scaling near plane and far plane
	//lNearPlane *= lCameraScaling[FORWARD_SCALE];
	//lFarPlane *= lCameraScaling[FORWARD_SCALE];


	FbxCamera::EAspectRatioMode lCamAspectRatioMode = lCamera->GetAspectRatioMode();
	double lAspectX = lCamera->AspectWidth.Get();
	double lAspectY = lCamera->AspectHeight.Get();
	double lAspectRatio = 1.333333;
	switch( lCamAspectRatioMode)
	{
	case FbxCamera::eWindowSize:
		lAspectRatio = lAspectX / lAspectY;
		break;
	case FbxCamera::eFixedRatio:
		lAspectRatio = lAspectX;

		break;
	case FbxCamera::eFixedResolution:
		lAspectRatio = lAspectX / lAspectY * lCamera->GetPixelRatio();
		break;
	case FbxCamera::eFixedWidth:
		lAspectRatio = lCamera->GetPixelRatio() / lAspectY;
		break;
	case FbxCamera::eFixedHeight:
		lAspectRatio = lCamera->GetPixelRatio() * lAspectX;
		break;
	default:
		break;

	}

	//get the aperture ratio
	double lFilmHeight = lCamera->GetApertureHeight();
	double lFilmWidth = lCamera->GetApertureWidth() * lCamera->GetSqueezeRatio();
	//here we use Height : Width
	double lApertureRatio = lFilmHeight / lFilmWidth;


	//change the aspect ratio to Height : Width
	lAspectRatio = 1 / lAspectRatio;
	//revise the aspect ratio and aperture ratio
	FbxCamera::EGateFit lCameraGateFit = lCamera->GateFit.Get();
	switch( lCameraGateFit )
	{

	case FbxCamera::eFitFill:
		if( lApertureRatio > lAspectRatio)  // the same as eHORIZONTAL_FIT
		{
			lFilmHeight = lFilmWidth * lAspectRatio;
			lCamera->SetApertureHeight( lFilmHeight);
			lApertureRatio = lFilmHeight / lFilmWidth;
		}
		else if( lApertureRatio < lAspectRatio) //the same as eVERTICAL_FIT
		{
			lFilmWidth = lFilmHeight / lAspectRatio;
			lCamera->SetApertureWidth( lFilmWidth);
			lApertureRatio = lFilmHeight / lFilmWidth;
		}
		break;
	case FbxCamera::eFitVertical:
		lFilmWidth = lFilmHeight / lAspectRatio;
		lCamera->SetApertureWidth( lFilmWidth);
		lApertureRatio = lFilmHeight / lFilmWidth;
		break;
	case FbxCamera::eFitHorizontal:
		lFilmHeight = lFilmWidth * lAspectRatio;
		lCamera->SetApertureHeight( lFilmHeight);
		lApertureRatio = lFilmHeight / lFilmWidth;
		break;
	case FbxCamera::eFitStretch:
		lAspectRatio = lApertureRatio;
		break;
	case FbxCamera::eFitOverscan:
		if( lFilmWidth > lFilmHeight)
		{
			lFilmHeight = lFilmWidth * lAspectRatio;
		}
		else
		{
			lFilmWidth = lFilmHeight / lAspectRatio;
		}
		lApertureRatio = lFilmHeight / lFilmWidth;
		break;
	case FbxCamera::eFitNone:
	default:
		break;
	}
	//change the aspect ratio to Width : Height
	lAspectRatio = 1 / lAspectRatio;

	double lFieldOfViewX = 0.0;
	double lFieldOfViewY = 0.0;
	if ( lCamera->GetApertureMode() == FbxCamera::eVertical)
	{
		lFieldOfViewY = lCamera->FieldOfView.Get();
		lFieldOfViewX = VFOV2HFOV( lFieldOfViewY, 1 / lApertureRatio);
	}
	else if (lCamera->GetApertureMode() == FbxCamera::eHorizontal)
	{
		lFieldOfViewX = lCamera->FieldOfView.Get(); //get HFOV
		lFieldOfViewY = HFOV2VFOV( lFieldOfViewX, lApertureRatio);
	}
	else if (lCamera->GetApertureMode() == FbxCamera::eFocalLength)
	{
		lFieldOfViewX = lCamera->ComputeFieldOfView(lCamera->FocalLength.Get());    //get HFOV
		lFieldOfViewY = HFOV2VFOV( lFieldOfViewX, lApertureRatio);
	}
	else if (lCamera->GetApertureMode() == FbxCamera::eHorizAndVert) {
		lFieldOfViewX = lCamera->FieldOfViewX.Get();
		lFieldOfViewY = lCamera->FieldOfViewY.Get();
	}


	//revise the Perspective since we have film offset
	double lFilmOffsetX = lCamera->FilmOffsetX.Get();
	double lFilmOffsetY = lCamera->FilmOffsetY.Get();
	lFilmOffsetX = 0 - lFilmOffsetX / lFilmWidth * 2.0;
	lFilmOffsetY = 0 - lFilmOffsetY / lFilmHeight * 2.0;

	camera.setFov(lFieldOfViewY);
	camera.setNearClip(lNearPlane);
	camera.setFarClip(lFarPlane);


	camerasList.push_back(camera);
	return &camerasList.back();


}
	ofRectangle ObjectFinder::getObject(unsigned int i) const {
		return toOf(objects[i]);
	}
	ofRectangle ObjectFinder::getObjectSmoothed(unsigned int i) const {
		return toOf(tracker.getSmoothed(getLabel(i)));
	}
예제 #16
0
	ofPolyline toOf(cv::RotatedRect rect) {
		vector<cv::Point2f> corners(4);
		rect.points(&corners[0]);
		ofPolyline polyline = toOf(corners);
		return polyline;
	}
예제 #17
0
//--------------------------------------------------------------
void ofApp::setup() {
    ofSetVerticalSync(true);
    //ofSetWindowShape(640, 640);
    ofSetFullscreen(true);
    
    // Box2d
    box2d.init();
    box2d.setGravity(0, 10);
    box2d.createGround();
    box2d.setFPS(30.0);
    box2d.createBounds(ofRectangle(0, 0, ofGetWidth(), ofGetHeight()));
    
    // mask
    ofShader shader;
    ofFbo mask;
    ofFbo final;
    ofPixels pixels;
    
    image.load("jobs.jpg");
    
    //sigma: to smooth the image.
    //k: constant for treshold function.
    //min: minimum component size (enforced by post-processing stage).
    // segment the image
    segmentation.sigma = 1.0;
    segmentation.k = 300; // how small the chunks
    segmentation.min = 2000;
    segmentation.segment(image);
    segmentedImage.setFromPixels(segmentation.getSegmentedPixels());
    segmentedImage.update();
    
    // grab the contours of the individual segments
    contourFinder.setMinArea(1000);
    contourFinder.setMaxArea(image.getWidth() * image.getHeight());
    
    int i = 0;
    
    // for each segment found, create a TexturedBox2dPolygon
    for (int c = 0; c < segmentation.numSegments; c++)
    {
        //cout << segmentation.getSegmentMask(c);
        contourFinder.findContours(segmentation.getSegmentMask(c));
        if (contourFinder.getBoundingRect(0).width * contourFinder.getBoundingRect(0).height > image.getWidth()*image.getHeight()*0.15) {
            continue;
        }
        
        // get contour points
        ofPolyline points;
        for (auto p : contourFinder.getContour(0)) {
            points.addVertex(toOf(p));
        }
        //points.simplify(2);   // this seems to cause box2d to crash sometimes
        
        // get bounding box
        cv::Rect box = contourFinder.getBoundingRect(0);
        ofImage segmentImage;
        segmentImage.setFromPixels(image);
        segmentImage.crop(box.x, box.y, box.width, box.height);
        
        
        /*
        //Save the segmentImages
        std::string name = "test";
        name += ofToString(i) + ".png";
        //segmentImage.save(name) ;
        i++ ;*/
        
        mask.allocate(box.width, box.height);
        final.allocate(box.width, box.height);
        shader.load("standard.vert", "alphamask.frag");
        
        // make mask from contour points
        mask.begin();
        ofClear(0,0);
        ofSetColor(255);
        ofBeginShape();
        vector<ofPoint> & vertices = points.getVertices();
        for (int i=0; i<vertices.size(); i++) {
            ofVertex(vertices[i].x - box.x, vertices[i].y - box.y);
        }
        ofEndShape();
        mask.end();
        
        // make final masked texture and read into pixels
        final.begin();
예제 #18
0
void ofxPclStitcherDevice::processCloud() {
	if(debug && !doDraw)
		return;

	//Z filtering
	if(doColors) {
		passThroughColor.setFilterFieldName ("z");
		passThroughColor.filter (*cloudColor);
		passThroughColor.setFilterLimits (0.0, cropZ/scale);
		passThroughColor.setInputCloud (cloudColor);
	} else {
		ofxPclCloud cloud_temp;
		passThrough.setInputCloud (cloud);
		passThrough.setFilterFieldName ("z");
		passThrough.setFilterLimits (0.0, cropZ/scale);
		passThrough.filter (cloud_temp);
		pcl::copyPointCloud(cloud_temp, *cloud);
		//
	}


	//do downsampling
	if(downsample) {
		if(doColors) {
			gridColor.setLeafSize(downsampleSize, downsampleSize, downsampleSize);
			gridColor.setInputCloud(cloudColor);
			gridColor.filter(*cloudColor);
		} else {
			grid.setLeafSize(downsampleSize, downsampleSize, downsampleSize);
			grid.setInputCloud(cloud);
			grid.filter(*cloud);
		}
	}

	//apply matrix transforms
	/*
	ofQuaternion quat;
	ofVec3f Znormal(0, 0, 1);
	ofVec3f Xnormal(1, 0, 0);
	ofVec3f Ynormal(1, 0, 1);
	ofQuaternion qr (rotationZ, Znormal); // quat roll.
	ofQuaternion qp (rotationX, Xnormal); // quat pitch.
	ofQuaternion qh (rotationY, Ynormal); // quat heading or yaw.
	quat = qr * qp * qh;
	*/
	dNode.resetTransform();
	dNode.tilt(rotationX);
	dNode.pan(rotationY);
	dNode.roll(rotationZ);
	dNode.setPosition(translateX, translateY, translateZ);

	ofMatrix4x4 matrix = dNode.getGlobalTransformMatrix().getInverse();
	matrix.setTranslation(translateX/scale, translateY/scale, translateZ/scale);
	//matrix.scale(-1, -1, 1);

	/*
	if(debug) {
		ofMatrix4x4 matrixNode;
		matrixNode.setRotate(quat);
		matrixNode.translate(translateX, translateY, translateZ);
		matrixNode.scale(-1, -1, 1);
		dNode.setTransformMatrix(matrixNode);
	}
	*/

	//TODO: rotation
	//matrix.scale(1, -1, -1);

	Eigen::Matrix4f eigenMat;
	eigenMat << matrix(0,0), matrix(1,0), matrix(2,0), matrix(3,0),
	         matrix(0,1), matrix(1,1), matrix(2,1), matrix(3,1),
	         matrix(0,2), matrix(1,2), matrix(2,2), matrix(3,2),
	         matrix(0,3), matrix(1,3), matrix(2,3), matrix(3,3);

	if(doColors)
		pcl::transformPointCloud(*cloudColor, *cloudColor, eigenMat);
	else
		pcl::transformPointCloud(*cloud, *cloud, eigenMat);

	if(debug) {
		if(doColors) {
			toOf(cloudColor, mesh, scale, scale, scale, true, color);
		} else {
			toOf(cloud, mesh, scale, scale, scale, color);
		}
	}
}
예제 #19
0
//--------------------------------------------------------------
void ofApp::draw(){
    
    // GUI
    ofBackground(0);
    ofSetColor(255);
    ofPushMatrix();
    kinect.draw(0, 0);
    ofTranslate(640, 0);
    grayImage.draw(0, 0);
    ofTranslate(-640, 480);
    contourFinder.draw();
    ofTranslate(640, 0);
    ofPopMatrix();

    
    //Alpha masking code
    fbo.begin();
    
    ofClear(0, 0);
    ofSetColor(0, 255);
    ofRect(0, 0, fbo.getWidth(), fbo.getHeight());
    ofEnableBlendMode(OF_BLENDMODE_SUBTRACT);
    ofSetColor(255);
    
    
    //countor finder has a thing called the tracker - gives you data associated with the countour
    RectTracker& tracker = contourFinder.getTracker();
    
    for(int i = 0; i < contourFinder.size(); i++) {
        // get contour, label, center point, and age of contour
        vector<cv::Point> points = contourFinder.getContour(i);//getting the counttour points (green line around blob)
        int label = contourFinder.getLabel(i);//the label is keeping track of blobs over frames. trying to identify a single object from frame to frame
        ofPoint center = toOf(contourFinder.getCenter(i));//center point of the point array
        int age = tracker.getAge(label);//not really using this so much
        
        // map contour using calibration and draw to main window
        //drawing the countours as they are mapped
        ofBeginShape();
        ofFill();
        ofSetColor(blobColors[label % 12]); //mod operator gives a persistant color
        for (int j=0; j<points.size(); j++) { //loop through all points
            //world cordinate is a 3D cordinate inside the kinect depth cloud
            //this is a standard way of getting kinect info
            //pp = pixesl point, coresponding pixel between 0 and 1 for both x and y. Scale it to whatever the screen size is. ofMap scales it.
            ofVec3f wp = kinect.getWorldCoordinateAt(points[j].x, points[j].y);
            ofVec2f pp = kpt.getProjectedPoint(wp);
            ofVertex(
                     ofMap(pp.x, 0, 1, 0, secondWindow.getWidth()),
                     ofMap(pp.y, 0, 1, 0, secondWindow.getHeight())
                     );
        }
        ofEndShape();
    }

    ofDisableBlendMode();
    
    fbo.end();
    
    // MAIN WINDOW
    secondWindow.begin();
    
    ofEnableBlendMode(OF_BLENDMODE_ALPHA);
    
    
//        
//        shader.begin();
//        shader.setUniform2f("resolution", ofGetWidth(), ofGetHeight());
//        ofRect(0, 0, ofGetWidth(), ofGetHeight());
//        
//        shader.end();
    

    
    ofSetColor(255);
    fbo.draw(0,0, secondWindow.getWidth(),secondWindow.getHeight());
    ofDisableBlendMode();
    
    //box2d draw
    // some circles :)
//    for (int i=0; i<circles.size(); i++) {
//        ofFill();
//        ofSetColor(255);
//        //ofSetHexColor(0xc0dd3b);
//        circles[i].get()->draw();
//    }

    
    secondWindow.end();
  
    
    //////////////////////////////////////
    //Screen Effect **** DONT FORGET TO UNCOMMENT SETUP SCREEN TOGGLE LINE

    
    ofEnableBlendMode(OF_BLENDMODE_ALPHA);

//    shader.begin();
//    shader.setUniform2f("resolution", ofGetWidth(), ofGetHeight());
//    ofRect(0, 0, ofGetWidth(), ofGetHeight());
//    
//    shader.end();
    
    ofSetColor(255);
    image.draw(0,0,ofGetWidth(), ofGetHeight());
    fbo.draw(0,0);
    ofDisableBlendMode();
    
    
    //////////////////////////////////////

}
예제 #20
0
ofRectangle Clipper::getBounds(ClipperLib::cInt scale) const
{
    return toOf(GetBounds(), scale);
}
예제 #21
0
		//----------
		ofPixels Skeleton::getUserMask(bool copy) const {
			return toOf(this->bodyTracker->user_mask());
		}
예제 #22
0
		//----------
		ofPixels Skeleton::getLabelsImage(bool copy) const {
			return toOf(this->bodyTracker->labels());
		}
예제 #23
0
파일: 3Dscan.cpp 프로젝트: imclab/OpenC3DS
//--------------------------------------------------------------
void scan(Cam *cam, ofImage *grislaser, ofImage *TaL, ofImage *TsL){

    Mat image1;
    Mat Laser1;
    Mat Tot, gris, grisc;

    Mat HSV;
    Mat threshold1;

//    camera(cam);

    int valueRL = 60;
    int valueGL = 0;
    int valueBL = 0;

    Mat tt1, tt2, tt3, colo;

    Mat matImg;

    cv::Point punt;

    tt1 = toCv(*TaL).clone();
    Laser1 = tt1.clone();

    tt2 = toCv(*TsL).clone();
    Tot = tt2.clone();
    Mat th1, th2;
    Mat image2;

    absdiff(Laser1, Tot, image1);
    cvtColor(image1, HSV, CV_BGR2HSV);
    inRange(HSV, Scalar(cam->Bi, cam->Gi, cam->Ri), Scalar(cam->Bs, cam->Gs, cam->Rs), threshold1);
    th1 = threshold1.clone();
    image2 = image1.clone();
    GaussianBlur(threshold1, th1, cv::Size(1,1), 0,0);
    GaussianBlur(image2, image1, cv::Size(cam->blur_ksizew, cam->blur_ksizeh), cam->blur_sigmax, cam->blur_sigmay);
    cam_cap_subpixel(cam, image1, threshold1);

    cvtColor(image1, gris, CV_BGR2GRAY);
    cvtColor(gris, grisc, CV_GRAY2BGR);

    for(int i=0; i<cam->resy; i++){
        cv::Point paux1;
        paux1.x = (int)cam->p[i].x;
        paux1.y = (int)cam->p[i].y;

        line(grisc, paux1, paux1, Scalar(255,0,0), 1,8,0);
    }

    ofImage gl,L1,Tt;

    toOf(grisc, gl);
    gl.update();

    *grislaser = gl;

    toOf(Laser1, L1);
    L1.update();

    *TaL = L1;

    toOf(Tot, Tt);
    Tt.update();

    *TsL = Tt;
}
예제 #24
0
파일: 3Dscan.cpp 프로젝트: imclab/OpenC3DS
//--------------------------------------------------------------
void contenidor(Cam cam, ofImage *image){

    float minx = 1024;
    int ixmin = 0;
    int deltak = 10;
    float xco, yco;
    cv::Point punt;
    float recta_sx[cam.resy-1], recta_ix[cam.resy-1], recta_sy[cam.resy-1], recta_iy[cam.resy-1];
    double scx, scxx, scy, scxy;
    double meanc_x, meanc_y;
    double varcx, covc;
    double paramcs0, paramcs1, paramci0, paramci1;
    float Xco, Yco, Xc1, Yc1, Zco, Zc1;
    double delta_alfa, delta_alfa1;
    Mat ima_aux = toCv(*image);

    for(int i=0; i<=cam.resy-1; i++){
        if(cam.p[i].x < minx){
            ixmin = i;
            minx = cam.p[i].x;
        }
    }

    int j = 0;
    for(int i=0; i<=ixmin-deltak; i++){
        if(cam.p[i].x != 1024){
            recta_sx[j] = cam.p[i].x;
            recta_sy[j] = cam.p[i].y;
            j = j + 1;
            scx += cam.p[i].x;
            scy += cam.p[i].y;
            scxy += cam.p[i].x * cam.p[i].y;
            scxx += cam.p[i].x * cam.p[i].x;
        }
    }

    double mean_cx = scx / j;
    double mean_cy = scy / j;

    varcx = scxx - scx * mean_cx;
    covc = scxy - scx * mean_cy;

    // check for zero varx
    paramcs0 = covc / varcx;
    paramcs1= mean_cy - paramcs0 * mean_cx;


    scx = scy = scxy = scxx = 0;

    int k = 0;
    for(int i=ixmin+deltak; i<=cam.resy-1; i++){
        if(cam.p[i].x != 1024){
            recta_ix[k] = cam.p[i].x;
            recta_iy[k] = cam.p[i].y;
            k = k + 1;
            scx += cam.p[i].x;
            scy += cam.p[i].y;
            scxy += cam.p[i].x * cam.p[i].y;
            scxx += cam.p[i].x * cam.p[i].x;
        }
    }

    mean_cx = scx / k;
    mean_cy = scy / k;

    varcx = scxx - scx * mean_cx;
    covc = scxy - scx * mean_cy;

    // check for zero varx
    paramci0 = covc / varcx;
    paramci1= mean_cy - paramci0 * mean_cx;

    xco = (-paramcs1+paramci1) / (paramcs0-paramci0);
    yco = paramcs0 * xco + paramcs1;

    punt.x = xco;
    punt.y = yco;

    circle(ima_aux, punt, 10, Scalar( 20,255,255 ),2, 1, 0);

    ofImage im;

    toOf(ima_aux,im);
    im.update();

    *image = im;

    if( (j!=0)&&(k!=0) ){
        cam_dis(cam,1, xco,yco,&Xco,&Yco);
        cam_dis(cam,1, xco+50,paramci0*(xco+50)+paramci1,&Xc1,&Yc1);

        delta_alfa=(PI/180.0f) * cam.alfa * (1-((float)yco/(float)(cam.resy/2.0f)));

        if(delta_alfa != 0){
            delta_alfa1 = sqrt(Xco*Xco+Yco*Yco)/cos(delta_alfa);
        }

        Zco = delta_alfa1 * sin(delta_alfa);

        delta_alfa = (PI/180.0f) * cam.alfa * (1-((float)(paramci0*(xco+50)+paramci1)/(float)(cam.resy/2.0f)));

        if (delta_alfa != 0){
            delta_alfa1 = sqrt(Xc1*Xc1+Yc1*Yc1)/cos(delta_alfa);
        }

        Zc1 = delta_alfa1 * sin(delta_alfa);

        float anglec;

        if(Zc1-Zco != 0){
            anglec = -1 * atan((Yc1-Yco)/(Zc1-Zco));
        }
        else{
            anglec = 0.0f;
        }

        ofLogError() << "3Dscan::contenidor: Posició Vertex: " << Yco << "," << Zco << endl;
        ofLogError() << "3Dscan::contenidor: Posició centre Contenidor: " << Yco + 25 * sin(anglec) + 25 * cos(anglec) << "," << Zco - 25 * cos(anglec) + 25 * sin(anglec) << endl;
        ofLogError() << "3Dscan::contenidor: Angle camió: " << (180.0f/PI) * anglec << endl;
//        printf("Posició vertex-->Y:%f Z:%f \n",Yco,Zco);
//        printf("Posició centre-->Y:%f Z:%f \n",Yco+25*sin(anglec)+25*cos(anglec),Zco-25*cos(anglec)+25*sin(anglec));
//        printf("Angle/camió: %f \n", (180/PI)*anglec);
//        circle(image1, punt, 10, Scalar( 20,255,255 ),2, 1, 0);
//        contenidor

    } // end if( (j!=0)&&(k!=0) )
}
예제 #25
0
	ofPolyline convexHull(const ofPolyline& polyline) {
		vector<cv::Point2f> contour = toCv(polyline);
		vector<cv::Point2f> hull;
		convexHull(Mat(contour), hull);
		return toOf(hull);
	}
예제 #26
0
//--------------------------------------------------------------
void ofxFBXMesh::computeBlendShapes( ofMesh* aMesh, FbxTime& pTime, FbxAnimLayer * pAnimLayer ) {
    int lBlendShapeDeformerCount = fbxMesh->GetDeformerCount(FbxDeformer::eBlendShape);
//    cout << "Computing blendshapes for " << getName() << endl;
    for(int lBlendShapeIndex = 0; lBlendShapeIndex<lBlendShapeDeformerCount; ++lBlendShapeIndex) {
        FbxBlendShape* lBlendShape = (FbxBlendShape*)fbxMesh->GetDeformer(lBlendShapeIndex, FbxDeformer::eBlendShape);
        
        int lBlendShapeChannelCount = lBlendShape->GetBlendShapeChannelCount();
		for(int lChannelIndex = 0; lChannelIndex<lBlendShapeChannelCount; ++lChannelIndex) {
			FbxBlendShapeChannel* lChannel = lBlendShape->GetBlendShapeChannel(lChannelIndex);
			if(lChannel) {
				// Get the percentage of influence on this channel.
				FbxAnimCurve* lFCurve = fbxMesh->GetShapeChannel(lBlendShapeIndex, lChannelIndex, pAnimLayer);
				if (!lFCurve) continue;
                
				double lWeight          = lFCurve->Evaluate(pTime);
//                cout << "updateMesh lWeight = " << lWeight << " time = " << pTime.GetMilliSeconds() << endl;
                
                int lShapeCount         = lChannel->GetTargetShapeCount();
				double* lFullWeights    = lChannel->GetTargetShapeFullWeights();
                
                // Find out which scope the lWeight falls in.
				int lStartIndex = -1;
				int lEndIndex = -1;
				for(int lShapeIndex = 0; lShapeIndex<lShapeCount; ++lShapeIndex) {
					if(lWeight > 0 && lWeight <= lFullWeights[0]) {
						lEndIndex = 0;
						break;
					}
					if(lWeight > lFullWeights[lShapeIndex] && lWeight < lFullWeights[lShapeIndex+1]) {
						lStartIndex = lShapeIndex;
						lEndIndex = lShapeIndex + 1;
						break;
					}
				}
                
                FbxShape* lStartShape = NULL;
				FbxShape* lEndShape = NULL;
				if(lStartIndex > -1) {
					lStartShape = lChannel->GetTargetShape(lStartIndex);
				}
				if(lEndIndex > -1) {
					lEndShape = lChannel->GetTargetShape(lEndIndex);
				}
                
                //The weight percentage falls between base geometry and the first target shape.
				if(lStartIndex == -1 && lEndShape) {
                    float lEndWeight    = lFullWeights[0];
                    lWeight = (lWeight/lEndWeight);
                    
                    cout << "updateMesh : weight = " << lWeight << endl;
                    for (int j = 0; j < aMesh->getNumVertices(); j++) {
                        // Add the influence of the shape vertex to the mesh vertex.
                        ofVec3f influence = (toOf(lEndShape->GetControlPoints()[j]) - original.getVertices()[j]) * lWeight;
                        aMesh->getVertices()[j] += influence;
                    }
                    
                } else if(lStartShape && lEndShape) {
                    float lStartWeight  = lFullWeights[lStartIndex];
					float lEndWeight    = lFullWeights[lEndIndex];
                    // Calculate the real weight.
                    lWeight = ofMap(lWeight, lStartWeight, lEndWeight, 0, 1, true);
                    cout << "updateMesh : weight = " << lWeight << " lStartWeight " << lStartWeight << " lEndWeight " << lEndWeight << endl;
                    //					lWeight = ((lWeight-lStartWeight)/(lEndWeight-lStartWeight)) * 100;
                    for (int j = 0; j < aMesh->getNumVertices(); j++) {
                        // Add the influence of the shape vertex to the mesh vertex.
                        ofVec3f influence = (toOf(lEndShape->GetControlPoints()[j] - lStartShape->GetControlPoints()[j] )) * lWeight;
                        aMesh->getVertices()[j] += influence;
                    }
                }
                
            }
        }
    }
}
예제 #27
0
//--------------------------------------------------------------
void ofApp::setup() {
    ofSetVerticalSync(true);
    ofSetWindowShape(640, 640);
    //ofSetFullscreen(true);
    
    // Box2d
    box2d.init();
    box2d.setGravity(0, 10);
    box2d.createGround();
    box2d.setFPS(30.0);
    box2d.createBounds(ofRectangle(0, 0, ofGetWidth(), ofGetHeight()));
 
    // mask
    ofShader shader;
    ofFbo mask;
    ofFbo final;
    ofPixels pixels;
    
    // load the image to break
    image.loadImage("Geo-Map--USA.jpg");

    // segment the image
    segmentation.sigma = 1.0;
    segmentation.k = 300;
    segmentation.min = 2000;
    segmentation.segment(image);
    segmentedImage.setFromPixels(segmentation.getSegmentedPixels());
    segmentedImage.update();

    // grab the contours of the individual segments
    contourFinder.setMinArea(1000);
    contourFinder.setMaxArea(image.width * image.height);

    // for each segment found, create a TexturedBox2dPolygon
    for (int c = 0; c < segmentation.numSegments; c++)
    {
        contourFinder.findContours(segmentation.getSegmentMask(c));
        if (contourFinder.getBoundingRect(0).width * contourFinder.getBoundingRect(0).height > image.getWidth()*image.getHeight()*0.15) {
            continue;
        }
        
        // get contour points
        ofPolyline points;
        for (auto p : contourFinder.getContour(0)) {
            points.addVertex(toOf(p));
        }
        //points.simplify(2);   // this seems to cause box2d to crash sometimes

        // get bounding box
        cv::Rect box = contourFinder.getBoundingRect(0);
        ofImage segmentImage;
        segmentImage.setFromPixels(image);
        segmentImage.crop(box.x, box.y, box.width, box.height);
        
        mask.allocate(box.width, box.height);
        final.allocate(box.width, box.height);
        shader.load("standard.vert", "alphamask.frag");
        
        // make mask from contour points
        mask.begin();
        ofClear(0,0);
        ofSetColor(255);
        ofBeginShape();
        vector<ofPoint> & vertices = points.getVertices();
        for (int i=0; i<vertices.size(); i++) {
            ofVertex(vertices[i].x - box.x, vertices[i].y - box.y);
        }
        ofEndShape();
        mask.end();
        
        // make final masked texture and read into pixels
        final.begin();
예제 #28
0
 //------------------------------------------------------------------------
 ofPixelsRef CvProcessor::process ( ofBaseImage & image ){
     if ( bTrackHaar ){
         processHaar( cameraBabyImage );
     }
     
     if ( bTrackOpticalFlow ){
         processOpticalFlow( cameraSmallImage );
     }
     
     differencedImage.setFromPixels(image.getPixelsRef());
     ofxCv::threshold(differencedImage, threshold);
     
     // find contours
     contourFinder.setFindHoles( bFindHoles );
     contourFinder.setMinArea( minBlobArea * tspsWidth * tspsHeight );
     contourFinder.setMaxArea( maxBlobArea * tspsWidth * tspsHeight );
     contourFinder.findContours( differencedImage );
     
     // update people
     RectTracker& rectTracker    = contourFinder.getTracker();
     cv::Mat cameraMat           = toCv(cameraImage);
     
     //optical flow scale
     // float flowROIScale = tspsWidth/flow.getWidth();
     
     for(int i = 0; i < contourFinder.size(); i++){
         unsigned int id = contourFinder.getLabel(i);
         if(rectTracker.existsPrevious(id)) {
             CvPerson* p = (CvPerson *) getTrackedPerson(id);
             //somehow we are not tracking this person, safeguard (shouldn't happen)
             if(NULL == p){
                 ofLog(OF_LOG_WARNING, "Person::warning. encountered persistent blob without a person behind them\n");
                 continue;
             }
             p->oid = i; //hack ;(
             
             //update this person with new blob info
             // to-do: make centroid dampening dynamic
             p->update(true);
             
             
             //normalize simple contour
             for (int i=0; i<p->simpleContour.size(); i++){
                 p->simpleContour[i].x /= tspsWidth;
                 p->simpleContour[i].y /= tspsHeight;
             }
             
             //find peak in blob (only useful with depth cameras)
             cv::Point minLoc, maxLoc;
             double minVal = 0, maxVal = 0;
             cv::Rect rect;
             rect.x      = p->boundingRect.x;
             rect.y      = p->boundingRect.y;
             rect.width  = p->boundingRect.width;
             rect.height = p->boundingRect.height;
             cv::Mat roiMat(cameraMat, rect);
             cv::minMaxLoc( roiMat, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
             
             // set depth
             p->depth = p->highest.z / 255.0f;
             
             // set highest and lowest points: x, y, VALUE stored in .z prop
             // ease vals unless first time you're setting them
             if ( p->highest.x == -1 ){
                 p->highest.set(  p->boundingRect.x + maxLoc.x,  p->boundingRect.y + maxLoc.y, maxVal);
                 p->lowest.set(  p->boundingRect.x + minLoc.x,  p->boundingRect.y + minLoc.y, minVal);
             } else {
                 p->highest.x = ( p->highest.x * .9 ) + ( p->boundingRect.x + maxLoc.x ) * .1;
                 p->highest.y = ( p->highest.y * .9 ) + ( p->boundingRect.y + maxLoc.y ) * .1;
                 p->highest.z = ( p->highest.z * .9) + ( maxVal ) * .1;
                 p->lowest.x = ( p->lowest.x * .9 ) + ( p->boundingRect.x + minLoc.x ) * .1;
                 p->lowest.y = ( p->lowest.y * .9 ) + ( p->boundingRect.y + minLoc.y ) * .1;
                 p->lowest.z = ( p->lowest.z * .9) + ( minVal ) * .1;
             }
             
             // cap highest + lowest
             p->highest.x = (p->highest.x > tspsWidth ? tspsWidth : p->highest.x);
             p->highest.x = (p->highest.x < 0 ? 0 : p->highest.x);
             p->highest.y = (p->highest.y > tspsHeight ? tspsHeight : p->highest.y);
             p->highest.y = (p->highest.y < 0 ? 0 : p->highest.y);
             
             p->lowest.x = (p->lowest.x > tspsWidth ? tspsWidth : p->lowest.x);
             p->lowest.x = (p->lowest.x < 0 ? 0 : p->highest.x);
             p->lowest.y = (p->lowest.y > tspsHeight ? tspsHeight : p->lowest.y);
             p->lowest.y = (p->lowest.y < 0 ? 0 : p->highest.y);
             
             // ROI for opticalflow
             ofRectangle roi = p->getBoundingRectNormalized(tspsWidth, tspsHeight);
             roi.x *= flow.getWidth();
             roi.y *= flow.getHeight();
             roi.width *= flow.getWidth();
             roi.height *= flow.getHeight();                
             
             // sum optical flow for the person
             if ( bTrackOpticalFlow && bFlowTrackedOnce ){
                 // TO-DO!
                 p->opticalFlowVectorAccumulation = flow.getAverageFlowInRegion(roi);
             } else {
                 p->opticalFlowVectorAccumulation.x = p->opticalFlowVectorAccumulation.y = 0;
             }
             
             //detect haar patterns (faces, eyes, etc)
             if ( bTrackHaar ){
                 //find the region of interest, expanded by haarArea.
                 ofRectangle haarROI;
                 haarROI.x		= (p->boundingRect.x - haarAreaPadding/2) * haarTrackingScale > 0.0f ? (p->boundingRect.x - haarAreaPadding/2) * haarTrackingScale : 0.0;
                 haarROI.y		= (p->boundingRect.y - haarAreaPadding/2) * haarTrackingScale > 0.0f ? (p->boundingRect.y - haarAreaPadding/2) : 0.0f;
                 haarROI.width	= (p->boundingRect.width  + haarAreaPadding*2) * haarTrackingScale > cameraBabyImage.width ? (p->boundingRect.width  + haarAreaPadding*2) * haarTrackingScale : cameraBabyImage.width;
                 haarROI.height	= (p->boundingRect.height + haarAreaPadding*2) * haarTrackingScale > cameraBabyImage.height ? (p->boundingRect.height + haarAreaPadding*2) * haarTrackingScale : cameraBabyImage.height;
                 
                 bool haarThisFrame = false;
                 for(int j = 0; j < haarObjects.size(); j++) {
                     ofRectangle hr = toOf(haarObjects[j]);
                     
                     //check to see if the haar is contained within the bounding rectangle
                     if(hr.x > haarROI.x && hr.y > haarROI.y && hr.x+hr.width < haarROI.x+haarROI.width && hr.y+hr.height < haarROI.y+haarROI.height){
                         hr.x /= haarTrackingScale;
                         hr.y /= haarTrackingScale;
                         hr.width /= haarTrackingScale;
                         hr.height /= haarTrackingScale;
                         p->setHaarRect(hr);
                         haarThisFrame = true;
                         break;
                     }
                 }
                 if(!haarThisFrame){
                     p->noHaarThisFrame();
                 }
             }
             personUpdated(p, scene);
         } else {
             ofPoint centroid = toOf(contourFinder.getCentroid(i));
             CvPerson* newPerson = new CvPerson(id, i, contourFinder);
             personEntered(newPerson, scene);
         }
     }
     
     //reset scene
     if ( bTrackOpticalFlow && bFlowTrackedOnce ){
         scene->averageMotion = flow.getAverageFlow();
     } else {
         scene->averageMotion = ofPoint(0,0);
     }
     scene->update( trackedPeople, tspsWidth, tspsHeight );
     
     // delete old blobs
     for (int i=trackedPeople->size()-1; i>=0; i--){
         Person* p = (*trackedPeople)[i];
         EventArgs args;
         args.person = p;
         args.scene  = scene;
         
         if (p == NULL){
             personWillLeave(p, scene);
             trackedPeople->erase(trackedPeople->begin() + i);
         } else if ( !(rectTracker.existsPrevious( p->pid ) && rectTracker.existsCurrent(p->pid)) && !rectTracker.existsCurrent(p->pid) ){
             personWillLeave(p, scene);
             trackedPeople->erase(trackedPeople->begin() + i);
         }
     }
     return differencedImage.getPixelsRef();
 }