void kinectCapture::update() {
    
    if(bTwoKinects && !bKinectsStarted) {
        kinect1.update();
        kinect2.update();
        if (kinect1.isFrameNew() && kinect2.isFrameNew()) {
            bKinectsStarted = true;
            return;
        }
        else {
            return;
        }
    }
    
    kinect1.update();
    bKin1Refreshed = false;
    
    // IF KINECT ONE FRAME IS NEW ---------------------------------
    
    if (kinect1.isFrameNew()) {
        
        bKin1Refreshed = true;
        
        // DO: UPDATE ALL CV STUFF
        
        if (bMovementDetection) {
            cvGrayKin1Prev = cvGrayKin1;
        }
    
        cvGrayKin1.setFromPixels(kinect1.getDepthPixels(), kinect1.width, kinect1.height);
        
        if (bMovementDetection) {
            kin1BlobTracker.update(cvGrayKin1, cvGrayKin1Prev, iNearThreshold, iFarThreshold,iMinBlobSize, iMaxBlobSize, iMaxNumBlobs, 20, false, true);
        }
        else {
            kin1BlobTracker.update(cvGrayKin1, iNearThreshold, iFarThreshold, iMinBlobSize, iMaxBlobSize, iMaxNumBlobs, 20, false, true);
        }
        
        kin1FoundBlobs.clear();
        
        // IF THERE ARE BLOBS -------------------------------------
        
        if (kin1BlobTracker.size() > 0) {
         
         // DO: UPDATE ALL BLOB STUFF
         
             for (int i = 0; i < kin1BlobTracker.trackedBlobs.size(); i++) {
                 if(kin1BlobTracker.trackedBlobs[i].pts.size() > 0) {
             
                     kin1FoundBlobs.push_back(ofxBlob());
             
                     kin1FoundBlobs[i].id = -1 * kin1BlobTracker.trackedBlobs[i].id;
                     
                     kin1FoundBlobs[i].angle = kin1BlobTracker.trackedBlobs[i].angle;
                     kin1FoundBlobs[i].maccel = kin1BlobTracker.trackedBlobs[i].maccel;
             
                     kin1FoundBlobs[i].centroid.x = setInRangeWidth(kin1BlobTracker.trackedBlobs[i].centroid.x, bTwoKinects, false);
                     kin1FoundBlobs[i].centroid.y = kin1BlobTracker.trackedBlobs[i].centroid.y;
                     kin1FoundBlobs[i].boundingRect.x = setInRangeWidth(kin1BlobTracker.trackedBlobs[i].boundingRect.x, bTwoKinects, false);
                     kin1FoundBlobs[i].boundingRect.y = kin1BlobTracker.trackedBlobs[i].boundingRect.y;
                     kin1FoundBlobs[i].boundingRect.width = setInRangeWidth(kin1BlobTracker.trackedBlobs[i].boundingRect.width, bTwoKinects, false);
                     kin1FoundBlobs[i].boundingRect.height =kin1BlobTracker.trackedBlobs[i].boundingRect.height;
                    
                     kin1FoundBlobs[i].angleBoundingRect.x = setInRangeWidth(kin1BlobTracker.trackedBlobs[i].angleBoundingRect.x, bTwoKinects, false);
                     kin1FoundBlobs[i].angleBoundingRect.y = kin1BlobTracker.trackedBlobs[i].angleBoundingRect.y;
                     kin1FoundBlobs[i].angleBoundingRect.width = setInRangeWidth(kin1BlobTracker.trackedBlobs[i].angleBoundingRect.width, bTwoKinects, false);
                     kin1FoundBlobs[i].angleBoundingRect.height =kin1BlobTracker.trackedBlobs[i].angleBoundingRect.height;
                     
                     for (int j = 0; j < kin1BlobTracker.trackedBlobs[i].pts.size(); j++) {
                         kin1FoundBlobs[i].pts.push_back(ofPoint(setInRangeWidth(kin1BlobTracker.trackedBlobs[i].pts[j].x, bTwoKinects, false),kin1BlobTracker.trackedBlobs[i].pts[j].y));
                     }
                 }
             }
         }
        
        // END IF THERE ARE BLOBS ---------------------------------
        
    }
    
    // ENDIF KINECT ONE FRAME IS NEW

    // IF USING TWO KINECTS ---------------------------------------
    
    if (bTwoKinects) {
        
        kinect2.update();
        bKin2Refreshed = false;
        
        // IF KINECT TWO FRAME IS NEW -----------------------------
        
        if (kinect2.isFrameNew()) {
            
            bKin2Refreshed = true;
            
            // DO: UPDATE ALL CV STUFF
            
            if(bMovementDetection) {
                cvGrayKin2Prev = cvGrayKin2;
            }
            cvGrayKin2.setFromPixels(kinect2.getDepthPixels(), kinect2.width, kinect2.height);
            
            if(bMovementDetection) {
                kin2BlobTracker.update(cvGrayKin2, cvGrayKin2Prev,iNearThreshold, iFarThreshold, iMinBlobSize, iMaxBlobSize, iMaxNumBlobs, 20, false, true);
            }
            else {
                kin2BlobTracker.update(cvGrayKin2, iNearThreshold, iFarThreshold, iMinBlobSize, iMaxBlobSize, iMaxNumBlobs, 20, false, true);
            }
                        
            
            kin2FoundBlobs.clear();
            
            // IF THERE ARE BLOBS ---------------------------------
            
            if (kin2BlobTracker.size() > 0) {
                
                // DO: UPDATE ALL BLOB STUFF
                
                for (int i = 0; i < kin2BlobTracker.trackedBlobs.size(); i++) {
                    if(kin2BlobTracker.trackedBlobs[i].pts.size() > 0) {
                        
                        kin2FoundBlobs.push_back(ofxBlob());
                        
                        kin2FoundBlobs[i].id = kin2BlobTracker.trackedBlobs[i].id;
                        
                        kin2FoundBlobs[i].angle = kin2BlobTracker.trackedBlobs[i].angle;
                        kin2FoundBlobs[i].maccel = kin2BlobTracker.trackedBlobs[i].maccel;
                        
                        kin2FoundBlobs[i].centroid.x = setInRangeWidth(kin2BlobTracker.trackedBlobs[i].centroid.x, true, true);
                        kin2FoundBlobs[i].centroid.y = kin2BlobTracker.trackedBlobs[i].centroid.y;
                        kin2FoundBlobs[i].boundingRect.x = setInRangeWidth(kin2BlobTracker.trackedBlobs[i].boundingRect.x, true, true);
                        kin2FoundBlobs[i].boundingRect.y = kin2BlobTracker.trackedBlobs[i].boundingRect.y;
                        kin2FoundBlobs[i].boundingRect.width = setInRangeWidth(kin2BlobTracker.trackedBlobs[i].boundingRect.width, true, false);
                        kin2FoundBlobs[i].boundingRect.height =kin2BlobTracker.trackedBlobs[i].boundingRect.height;
                       
                        kin2FoundBlobs[i].angleBoundingRect.x = setInRangeWidth(kin2BlobTracker.trackedBlobs[i].angleBoundingRect.x, true, true);
                        kin2FoundBlobs[i].angleBoundingRect.y = kin2BlobTracker.trackedBlobs[i].angleBoundingRect.y;
                        kin2FoundBlobs[i].angleBoundingRect.width = setInRangeWidth(kin2BlobTracker.trackedBlobs[i].angleBoundingRect.width, true, false);
                        kin2FoundBlobs[i].angleBoundingRect.height =kin2BlobTracker.trackedBlobs[i].angleBoundingRect.height;
                        
                        
                        for (int j = 0; j < kin2BlobTracker.trackedBlobs[i].pts.size(); j++) {
                            kin2FoundBlobs[i].pts.push_back(ofPoint(setInRangeWidth(kin2BlobTracker.trackedBlobs[i].pts[j].x, true, true), kin2BlobTracker.trackedBlobs[i].pts[j].y));
                        }
                    }
                }
            }
            
            // ENDIF THERE ARE BLOBS ------------------------------
                   
        }
        
        // ENDIF KINECT TWO FRAME IS NEW --------------------------
        
        // IF EITHER KINECT FRAME IS NEW --------------------------
        
        if (bKin1Refreshed || bKin2Refreshed) {
            
            // DO: ASSIGN NEW BLOBS TO <FOUND BLOBS>
            
            foundBlobs.clear();
            foundBlobs = kin1FoundBlobs;
            foundBlobs.insert(foundBlobs.end(), kin2FoundBlobs.begin(), kin2FoundBlobs.end());
            
            activeBlobsIds.clear();
            foundBlobsMap.clear();
            
            for (int i = 0; i < kin1FoundBlobs.size(); i++) {
                foundBlobsMap[kin1FoundBlobs[i].id] = kin1FoundBlobs[i];
                activeBlobsIds.push_back(kin1FoundBlobs[i].id);
            }
            for (int i = 0; i < kin2FoundBlobs.size(); i++) {
                foundBlobsMap[kin2FoundBlobs[i].id] = kin2FoundBlobs[i];
                activeBlobsIds.push_back(kin2FoundBlobs[i].id);
            }
            
            
            // DO: ASSIGN NEW CLOUD TO <POINT CLOUD>
            
            pointCloud.clear();
            pointCloudBuffer[iCurBufferIdx].clear();
                     
            for (int y = 0; y < KIN_H; y++) {
                for (int x = KIN_OUTPUT_W; x > 0; x--) {
                    if (x <= KIN2_INTERS_W) {
                        pointCloudBuffer[iCurBufferIdx].push_back(ofPoint(normWidth(KIN_OUTPUT_W - x, true), normHeight(y), normDepth((int)kinect1.getDistanceAt(x, y))));
                    }
                    else if (x > KIN2_INTERS_W && x <= KIN_W) {
                        int minDist = kinect1.getDistanceAt(x, y) < kinect2.getDistanceAt(x - KIN2_INTERS_W, y) ? kinect1.getDistanceAt(x, y) : kinect2.getDistanceAt(x - KIN2_INTERS_W, y);
                        pointCloudBuffer[iCurBufferIdx].push_back(ofPoint(normWidth(KIN_OUTPUT_W - x, true), normHeight(y), normDepth(minDist)));
                    }
                    else if (x > KIN2_INTERS_W) {
                        pointCloudBuffer[iCurBufferIdx].push_back(ofPoint(normWidth(KIN_OUTPUT_W - x, true), normHeight(y), normDepth((int)kinect2.getDistanceAt(x - KIN2_INTERS_W, y))));
                    }
                    pointCloud.push_back(ofPoint(normWidth(KIN_OUTPUT_W - x, true), normHeight(y), avgBuffer(x, y)));
                }
            }
            
            if (iCurBufferIdx >= iBufferSize-1) {
                iCurBufferIdx = 0;
            }
            else {
                iCurBufferIdx++;
            }
            
        }
        
        // ENDIF EITHER KINECT FRAME IS NEW -----------------------
    
    }
    
    // ELSE (NOT USING TWO KINECTS) -------------------------------
    
    else {
        
        // IF KINECT ONE FRAME IS NEW
        
        if (bKin1Refreshed) {
            
            // DO: ASSIGN NEW BLOBS TO <FOUND BLOBS>
            
            foundBlobs.clear();
            foundBlobs = kin1FoundBlobs;
            
            foundBlobsMap.clear();
            activeBlobsIds.clear();
            
            for (int i = 0; i < kin1FoundBlobs.size(); i++) {
                foundBlobsMap[kin1FoundBlobs[i].id] = kin1FoundBlobs[i];
                activeBlobsIds.push_back(kin1FoundBlobs[i].id);
            }
            
            // DO: ASSIGN NEW CLOUD TO <POINT CLOUD>
            
            pointCloud.clear();
            pointCloudBuffer[iCurBufferIdx].clear();
            
            for (int y = 0; y < KIN_H; y++) {
                for (int x = KIN_W; x > 0; x--) {
                    pointCloudBuffer[iCurBufferIdx].push_back(ofPoint(normWidth(KIN_W - x), normHeight(y), normDepth((int)kinect1.getDistanceAt(x,y))));
                    pointCloud.push_back(ofPoint(normWidth(KIN_W - x, true), normHeight(y), avgBuffer(x, y)));
                }
            }
            
            if (iCurBufferIdx >= iBufferSize-1) {
                iCurBufferIdx = 0;
            }
            else {
                iCurBufferIdx++;
            }
            
        }
        
    }
    
    // ENDIF USING TWO KINECTS ------------------------------------
}
//--------------------------------------------------------------------------------
int ofxContourFinder::findContours(	ofxCvGrayscaleImage&  input,
									int minArea,
									int maxArea,
									int nConsidered,
									double hullPress,	
									bool bFindHoles,
									bool bUseApproximation) {
	// get width/height disregarding ROI
    IplImage* ipltemp = input.getCvImage();
    width = ipltemp->width;
    height = ipltemp->height;
	reset();

	// opencv will clober the image it detects contours on, so we want to
    // copy it into a copy before we detect contours.  That copy is allocated
    // if necessary (necessary = (a) not allocated or (b) wrong size)
	// so be careful if you pass in different sized images to "findContours"
	// there is a performance penalty, but we think there is not a memory leak
    // to worry about better to create mutiple contour finders for different
    // sizes, ie, if you are finding contours in a 640x480 image but also a
    // 320x240 image better to make two ofxContourFinder objects then to use
    // one, because you will get penalized less.

	if( inputCopy.width == 0 ) {
		inputCopy.allocate( input.width, input.height );
		inputCopy = input;
	} else {
		if( inputCopy.width == input.width && inputCopy.height == input.height ) 
			inputCopy = input;
		else {
			// we are allocated, but to the wrong size --
			// been checked for memory leaks, but a warning:
			// be careful if you call this function with alot of different
			// sized "input" images!, it does allocation every time
			// a new size is passed in....
			inputCopy.clear();
			inputCopy.allocate( input.width, input.height );
			inputCopy = input;
		}
	}

	CvSeq* contour_list = NULL;
	contour_storage = cvCreateMemStorage( 1000 );
	storage	= cvCreateMemStorage( 1000 );

	CvContourRetrievalMode  retrieve_mode
        = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL;
	cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list,
                    sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE );
	
	CvSeq* contour_ptr = contour_list;

	nCvSeqsFound = 0;

	// put the contours from the linked list, into an array for sorting
	while( (contour_ptr != NULL) )  {
		CvBox2D box=cvMinAreaRect2(contour_ptr);
		
        float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) );
        if( (area > minArea) && (area < maxArea) ) {
            ofxBlob blob = ofxBlob();
            float area = cvContourArea( contour_ptr, CV_WHOLE_SEQ);
            cvMoments( contour_ptr, myMoments );
            
            // this is if using non-angle bounding box
            CvRect rect	= cvBoundingRect( contour_ptr, 0 );
            blob.boundingRect.x      = rect.x/width;
            blob.boundingRect.y      = rect.y/height;
            blob.boundingRect.width  = rect.width/width;
            blob.boundingRect.height = rect.height/height;
            
            //Angle Bounding rectangle
            blob.angleBoundingRect.x	  = box.center.x/width;
            blob.angleBoundingRect.y	  = box.center.y/height;
            blob.angleBoundingRect.width  = box.size.height/width;
            blob.angleBoundingRect.height = box.size.width/height;
            blob.angle = box.angle;
            
            // assign other parameters
            blob.area                = fabs(area);
            blob.hole                = area < 0 ? true : false;
            blob.length 			 = cvArcLength(contour_ptr);
            
            // The cast to int causes errors in tracking since centroids are calculated in
            // floats and they migh land between integer pixel values (which is what we really want)
            // This not only makes tracking more accurate but also more fluid
            blob.centroid.x			 = (myMoments->m10 / myMoments->m00) / width;
            blob.centroid.y 		 = (myMoments->m01 / myMoments->m00) / height;
            blob.lastCentroid.x 	 = 0;
            blob.lastCentroid.y 	 = 0;
            
            if (blob.nFingers != 0){
                
                blob.nFingers = 0;
                blob.fingers.clear();
            }
            
            // get the points for the blob:
            CvPoint           pt;
            CvSeqReader       reader;
            cvStartReadSeq( contour_ptr, &reader, 0 );
            
            for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, contour_ptr->total); j++ ) {
                CV_READ_SEQ_ELEM( pt, reader );
                blob.pts.push_back( ofPoint((float)pt.x / width, (float)pt.y / height) );
            }
            blob.nPts = blob.pts.size();
            
            // Check if it´s a Hand and if it have fingers
            //
            if (area > 5000){
                CvPoint*    PointArray;
                int*        hull;
                int         hullsize;
                
                CvSeq*  contourAprox = cvApproxPoly(contour_ptr, sizeof(CvContour), storage, CV_POLY_APPROX_DP, hullPress, 1 );
                int count = contourAprox->total; // This is number point in contour
                    
        
                PointArray = (CvPoint*)malloc( count*sizeof(CvPoint) ); // Alloc memory for contour point set.
                hull = (int*)malloc(sizeof(int)*count);	// Alloc memory for indices of convex hull vertices.
                
                cvCvtSeqToArray(contourAprox, PointArray, CV_WHOLE_SEQ); // Get contour point set.
                
                // Find convex hull for curent contour.
                cvConvexHull( PointArray, count, NULL, CV_COUNTER_CLOCKWISE, hull, &hullsize);
                
                int upper = 1, lower = 0;
                for	(int j=0; j<hullsize; j++) {
                    int idx = hull[j]; // corner index
                    if (PointArray[idx].y < upper) 
                        upper = PointArray[idx].y;
                    if (PointArray[idx].y > lower) 
                        lower = PointArray[idx].y;
                }
                
                float cutoff = lower - (lower - upper) * 0.1f;
                // find interior angles of hull corners
                for (int j=0; j < hullsize; j++) {
                    int idx = hull[j]; // corner index
                    int pdx = idx == 0 ? count - 1 : idx - 1; //  predecessor of idx
                    int sdx = idx == count - 1 ? 0 : idx + 1; // successor of idx
                    
                    cv::Point v1 = cv::Point(PointArray[sdx].x - PointArray[idx].x, PointArray[sdx].y - PointArray[idx].y);
                    cv::Point v2 = cv::Point(PointArray[pdx].x - PointArray[idx].x, PointArray[pdx].y - PointArray[idx].y);
                    
                    float angle = acos( (v1.x*v2.x + v1.y*v2.y) / (norm(v1) * norm(v2)) );
                    
                    // We got a finger
                    //
                    if (angle < 1 ){
                        ofPoint posibleFinger = ofPoint((float)PointArray[idx].x / width, 
                                                        (float)PointArray[idx].y / height);
                        
                        blob.nFingers++;
                        blob.fingers.push_back( posibleFinger );
                    }
                }
                
                
                if ( blob.nFingers > 0 ){
                    // because means that probably it's a hand                    
                    ofVec2f fingersAverage;
                    for (int j = 0; j < blob.fingers.size(); j++){
                        fingersAverage += blob.fingers[j];
                    }
                    
                    fingersAverage /= blob.fingers.size();
                    
                    if (blob.gotFingers){
                        blob.palm = (blob.palm + fingersAverage)*0.5;
                        //blob.palm = fingersAverage;
                    } else {
                        blob.palm = fingersAverage;
                        blob.gotFingers = true;   // If got more than three fingers in a road it'll remember
                    }
                }
                
                // Free memory.
                free(PointArray);
                free(hull);
            }
            
            blobs.push_back(blob);
        }
        contour_ptr = contour_ptr->h_next;
    }
    
	nBlobs = blobs.size();
	
	// Free the storage memory.
	// Warning: do this inside this function otherwise a strange memory leak
	if( contour_storage != NULL )
		cvReleaseMemStorage(&contour_storage);
	
	if( storage != NULL )
		cvReleaseMemStorage(&storage);
    
    free(contour_ptr);

	return nBlobs;
}