bool HausdorffImageProcessor::loadImage(string filename)
{
	m_filename = filename;
	m_originalImage = cv::imread( filename.c_str(), 1 );
	if(m_originalImage.data==NULL)
		return false;
	m_grayImage = convertToGray(m_originalImage);
	return true;
}
void
Auvsi_Recognize::extractShape( void )
{
	typedef cv::Vec<T, 1> VT;

	// Reduce input to two colors
	cv::Mat reducedColors = doClustering<T>( _image, 2 );	
	cv::Mat grayScaled, binary;

	// Make output grayscale
	grayScaled = convertToGray( reducedColors );
	//cv::cvtColor( reducedColors, grayScaled, CV_RGB2GRAY );

	// Make binary
	double min, max;
	cv::minMaxLoc( grayScaled, &min, &max );
	cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY );	

	// ensure that background is black, image white
	if( binary.at<VT>(0, 0)[0] > 0.0f )
		cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY_INV );

	binary.convertTo( binary, CV_8U, 255.0f );

	// Fill in all black regions smaller than largest black region with white
	CBlobResult blobs;
	CBlob * currentBlob;
	IplImage binaryIpl = binary;
	blobs = CBlobResult( &binaryIpl, NULL, 255 );
	
	// Get area of biggest blob
	CBlob biggestBlob;
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );

	// Remove all blobs of smaller area
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );

	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(255));
	}
	


	// Fill in all small white regions black 
	blobs = CBlobResult( &binaryIpl, NULL, 0 );
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );

	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );

	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(0));
	}

	binary = cv::Scalar(0);
	biggestBlob.FillBlob( &binaryIpl, cvScalar(255));

	_shape = binary;
} 
Beispiel #3
0
int main(int argc,char *argv[])
{
  //Handles user input
  if(argc<4 || argc>5)
  {
    printf("Incorrect number of arguments\n");
    printf("Number of arguments: %d\n",argc);
    exit(1);
  }

  //const char *inputFilename=argv[1];
  const char *inputFilename=argv[1];
  printf("Inputfile: %s\n",inputFilename);
  const char *outputFilename=argv[2];
  char garbage[2];
  int command;
  double sigma=3;

  if(1!=sscanf(argv[3],"%d%1s",&command,garbage) || command<0 || command>11)
  {
    printf("Incorrect command\n");
    exit(1);
  }

  if(command>0 && command<11 && argc==5)
  {
    printf("Incorrect number of arguments, exclude the sigma value");
    exit(1);
  }

  if(((command==0 || command==11) && argc==5 && 1!=sscanf(argv[4],"%lf%1s",&sigma,garbage)) || sigma<0)
  {
    printf("Incorrect sigma value\n");
    exit(1);
  }

  Filter *filters=initializeFilters(sigma);
  Image *inputImage=decode(inputFilename);
  printf("Width: %d, height: %d\n",inputImage->width,inputImage->height);
  Image *outputImage=generateOutput(inputImage);

  uint8_t *inRed=inputImage->redChannel;
  uint8_t *inBlue=inputImage->blueChannel;
  uint8_t *inGreen=inputImage->greenChannel;
  uint8_t *inAlpha=inputImage->alphaChannel;
  uint8_t *outRed=outputImage->redChannel;
  uint8_t *outBlue=outputImage->blueChannel;
  uint8_t *outGreen=outputImage->greenChannel;
  uint8_t *outAlpha=outputImage->alphaChannel;
  int height=inputImage->height;
  int width=inputImage->width;
  switch(command)
  {
    case(0):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[0].filter,filters[0].radius,width,height);
      break;
    }
    case(1):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[1].filter,filters[1].radius,width,height);
      break;
    }
    case(2):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[2].filter,filters[2].radius,width,height);
      break;
    }
    case(3):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[3].filter,filters[3].radius,width,height);
      break;
    }
    case(4):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[4].filter,filters[4].radius,width,height);
      break;
    }
    case(5):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[5].filter,filters[5].radius,width,height);
      break;
    }
    case(6):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[6].filter,filters[6].radius,width,height);
      break;
    }
    case(7):
    {
      convertToGray(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,gMonoMult,width,height);
      break;
     }
    case(8):
    {
      invertImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                  outAlpha,width,height);
      break;
    }
    case(9):
    {
      flipImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                outAlpha,width,height);
      break;
    }
    case(10):
    {
      pixelate(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
               outAlpha,8,8,width,height);
      break;
    }
    case(11):
    {
      Image *invImage=generateOutput(inputImage);
      Image *blurImage=generateOutput(inputImage);
      pencilSketch(inRed,inBlue,inGreen,inAlpha,invImage->redChannel,
                   invImage->blueChannel,invImage->greenChannel,
                   invImage->alphaChannel,blurImage->redChannel,
                   blurImage->blueChannel,blurImage->greenChannel,
                   blurImage->alphaChannel,outRed,outBlue,outGreen,
                   outAlpha,filters[0].filter,filters[0].radius,width,height,
                   gMonoMult);
      //NOTE THAT I NEED TO FREE EACH OF THE CHANNEL INDIVIDUALLY
      //MAKE A FREE IMAGE FUNCTION
      freeImage(invImage);
      freeImage(blurImage);
      break;
    }
    default:
      exit(1);
  }

  if(command!=12)
    encode(outputFilename,outputImage);

  free((double*)filters[0].filter);
  free(filters);
  freeImage(inputImage);
  freeImage(outputImage);
  return 0;
}
bool CameraNUI::onStep() {
    try {
        Mat cameraFrameCopy;
        Mat depthFrameCopy;
        Mat frame;
        LOG(LTRACE) << "CameraNUI::step\n";
#ifdef WITH_OPENNI
        if(lib == opencv) {
            if( !capture.grab() ) {
                LOG(LERROR) << "CameraKinect::onStep: Can't grab images\n";
            } else {
                if(depthMode == normalized) {
                    if( capture.retrieve( depthFrame, CV_CAP_OPENNI_DEPTH_MAP ) ) {
                        depthFrame.convertTo( show, CV_8UC1, 0.05 );
                        show.copyTo(depthFrameCopy);
                    }
                } else if(depthMode == disparityMap) {
                    capture.retrieve( depthFrame, CV_CAP_OPENNI_VALID_DEPTH_MASK );
                    depthFrame.copyTo(depthFrameCopy);
                } else if(depthMode == dM32f) {
                    capture.retrieve( depthFrame, CV_CAP_OPENNI_DISPARITY_MAP_32F );
                    depthFrame.copyTo(depthFrameCopy);
                } else if(depthMode == pointCloud) {
                    capture.retrieve( depthFrame, CV_CAP_OPENNI_POINT_CLOUD_MAP );
                    depthFrame.copyTo(depthFrameCopy);
                } else if(depthMode == valid) {
                    capture.retrieve( depthFrame, CV_CAP_OPENNI_VALID_DEPTH_MASK );
                    depthFrame.copyTo(depthFrameCopy);
                } else {
                    depthFrame.copyTo(depthFrameCopy);
                }

                if(cameraMode == gray) {
                    capture.retrieve( cameraFrame, CV_CAP_OPENNI_GRAY_IMAGE );
                    cameraFrame.copyTo(cameraFrameCopy);
                } else {
                    capture.retrieve( cameraFrame, CV_CAP_OPENNI_BGR_IMAGE );
                    cameraFrame.copyTo(cameraFrameCopy);
                }
            }
        } else {
#endif
            device->getVideo(cameraFrame);
            device->getDepth(depthFrame);
            // Write camera frame to output data stream
            // Write depth map to output data stream
            if(depthMode == normalized) {
                depthFrame.convertTo( show, CV_8UC1, SCALE_FACTOR );
                show.copyTo(depthFrameCopy);
            } else if(depthMode == dM32f) {
                convertToDisparityMap32f(depthFrame,depthFrameCopy);
            } else if(depthMode == disparityMap) {
                convertToDisparityMap(depthFrame,depthFrameCopy);
            } else if(depthMode == pointCloud) {
                convertToPointCloudMap(depthFrame,depthFrameCopy);
            } else if(depthMode == valid) {
                convertToValidPixelsMap(depthFrame,depthFrameCopy);
            } else {
                depthFrame.copyTo(depthFrameCopy);
            }

            if(cameraMode == gray) {
                convertToGray( cameraFrame, cameraFrameCopy );
            } else {
                cameraFrame.copyTo(cameraFrameCopy);
            }
#ifdef WITH_OPENNI
        }
#endif
        outImg.write(cameraFrameCopy);
        outDepthMap.write(depthFrameCopy);
        //Raise events
        newImage->raise();
        newDepthMap->raise();
    } catch (...) {
        LOG(LERROR) << "CameraNUI::onStep failed\n";
    }
    return true;
}