//-------------------------------------------------------------------------------- void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){ if( !bAllocated ){ ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImage(): image not allocated"; return; } if( !grayImage.bAllocated ){ grayImage.allocate(width, height); } ofRectangle roi = getROI(); ofRectangle grayRoi = grayImage.getROI(); if( grayRoi.width == roi.width && grayRoi.height == roi.height ){ switch (whichPlane){ case 0: cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL); grayImage.flagImageChanged(); break; case 1: cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL); grayImage.flagImageChanged(); break; case 2: cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL); grayImage.flagImageChanged(); break; } } else { ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch"; } }
//-------------------------------------------------------------------------------- void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){ if( !bAllocated ){ ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image not allocated"; return; } ofRectangle roi = getROI(); ofRectangle redRoi = red.getROI(); ofRectangle greenRoi = green.getROI(); ofRectangle blueRoi = blue.getROI(); if( !red.bAllocated ){ red.allocate(width, height); } if( !green.bAllocated ){ green.allocate(width, height); } if( !blue.bAllocated ){ blue.allocate(width, height); } if( redRoi.width == roi.width && redRoi.height == roi.height && greenRoi.width == roi.width && greenRoi.height == roi.height && blueRoi.width == roi.width && blueRoi.height == roi.height ) { cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL); red.flagImageChanged(); green.flagImageChanged(); blue.flagImageChanged(); } else { ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch"; } }
//-------------------------------------------------------------------------------- void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){ ofRectangle roi = getROI(); ofRectangle grayRoi = grayImage.getROI(); if( grayRoi.width == roi.width && grayRoi.height == roi.height ){ switch (whichPlane){ case 0: cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL); grayImage.flagImageChanged(); break; case 1: cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL); grayImage.flagImageChanged(); break; case 2: cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL); grayImage.flagImageChanged(); break; } } else { ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, ROI/size mismatch"); } }
//-------------------------------------------------------------------------------- void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){ ofRectangle roi = getROI(); ofRectangle redRoi = red.getROI(); ofRectangle greenRoi = green.getROI(); ofRectangle blueRoi = blue.getROI(); if( redRoi.width == roi.width && redRoi.height == roi.height && greenRoi.width == roi.width && greenRoi.height == roi.height && blueRoi.width == roi.width && blueRoi.height == roi.height ) { cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL); red.flagImageChanged(); green.flagImageChanged(); blue.flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, ROI/size mismatch"); } }
//-------------------------------------------------------------- void DepthHoleFiller::fillHolesUsingContourFinder (ofxCvGrayscaleImage &depthImage, int maxContourArea, int maxNContours ){ // Find just the holes, as geometric contours. computeBlobContours (depthImage, maxContourArea, maxNContours); // Rasterize those holes as filled polys, white on a black background. computeBlobImage(); // Re-color the blobs with graylevels interpolated from the depth image. fillBlobsWithInterpolatedData (depthImage); // Add the interpolated holes back into the depth image cvMax(depthImage.getCvImage(), ofxCv8uC1_Blobs.getCvImage(), depthImage.getCvImage()); // Flag changes to the images. ofxCv8uC1_Blobs.flagImageChanged(); depthImage.flagImageChanged(); }
//-------------------------------------------------------------- void testApp::update(){ /************ UPDATE BALL ***********************/ //Update ball position ballPositionX += ballVelocityX; ballPositionY += ballVelocityY; if(ballPositionX < 0 || ballPositionX > ofGetWidth()) { ballVelocityX *= -1; } if (ballPositionY < 0 || ballPositionY > ofGetHeight()) { ballVelocityY *= -1; } /************ UPDATE KINECT ***********************/ kinect.update(); // get color pixels colorImageRGB = kinect.getPixels(); // get depth pixels depthOrig = kinect.getDepthPixels(); // save original depth, and do some preprocessing depthProcessed = depthOrig; if(invert) depthProcessed.invert(); if(mirror) { depthOrig.mirror(false, true); depthProcessed.mirror(false, true); colorImageRGB.mirror(false, true); } if(preBlur) cvSmooth(depthProcessed.getCvImage(), depthProcessed.getCvImage(), CV_BLUR , preBlur*2+1); if(topThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), topThreshold * 255, 255, CV_THRESH_TRUNC); if(bottomThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), bottomThreshold * 255, 255, CV_THRESH_TOZERO); if(dilateBeforeErode) { if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount); if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount); } else { if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount); if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount); } depthProcessed.flagImageChanged(); // find contours depthContours.findContours(depthProcessed, minBlobSize * minBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(), maxBlobSize * maxBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(), maxNumBlobs, findHoles, useApproximation); // Clear old attraction points attractPts.clear(); // Find centroid point for each blob area and add an attraction force to it for (int i=0; i<depthContours.blobs.size(); i++) { attractPts.push_back(new ofPoint(depthContours.blobs[i].centroid)); //printf("Blob %d: %f %f \n", i, depthContours.blobs[i].centroid.x, depthContours.blobs[i].centroid.y); } // if one blob found, find nearest point in blob area static ofxVec3f newPoint; if(depthContours.blobs.size() == 1) { ofxCvBlob &blob = depthContours.blobs[0]; depthOrig.setROI(blob.boundingRect); double minValue, maxValue; CvPoint minLoc, maxLoc; cvMinMaxLoc(depthOrig.getCvImage(), &minValue, &maxValue, &minLoc, &maxLoc, NULL); depthOrig.resetROI(); newPoint.x = maxLoc.x + blob.boundingRect.x; newPoint.y = maxLoc.y + blob.boundingRect.y; // newPoint.z = (maxValue + offset) * depthScale; // read from depth map //printf("Min: %f %f Max: %f %f \n", minLoc.x, minLoc.y, maxLoc.x, maxLoc.y); // read directly from distance (in cm) // this doesn't seem to work, need to look into it newPoint.z = (kinect.getDistanceAt(newPoint) + depthOffset) * depthScale; // apply kalman filtering if(doKalman) { newPoint.x = updateKalman(0, newPoint.x); newPoint.y = updateKalman(1, newPoint.y); newPoint.z = updateKalman(2, newPoint.z); } } else { clearKalman(0); clearKalman(1); clearKalman(2); } pointHead = (pointHead + 1) % kNumPoints; curPoint += (newPoint - curPoint) * lerpSpeed; points[pointHead] = curPoint; }