Exemplo n.º 1
0
cv::Mat TrackFace::cartoonifyImageColor(cv::Mat srcColor)
{
    cv::Mat smallImg;

    cv::resize(srcColor, smallImg, Size(srcColor.cols/2, srcColor.rows/2), 0, 0, INTER_LINEAR);

    cv::Mat tmp=Mat(Size(srcColor.cols/2, srcColor.rows/2), CV_8UC3);

    int repetition=7;

    for (int i=0;i<repetition;i++)
    {
        int ksize=9;
        double sigmaColor=9;
        double sigmaSpace=7;

        bilateralFilter(smallImg, tmp, ksize, sigmaColor, sigmaSpace);
        bilateralFilter(tmp, smallImg, ksize, sigmaColor, sigmaSpace);
    }

    cv::Mat bigImg;
    cv::resize(smallImg, bigImg, Size(srcColor.cols,srcColor.rows), 0, 0, INTER_LINEAR);

    cv::Mat dst;

    bigImg.copyTo(dst, cartoonifyImageSketch(srcColor));

    return dst;
}
/******************************************************************************
 * Input argument(s) : QImage *inputImage - Input image for the filter
 *                     const int &radius - Radius of structuring element
 *                     const bool &roiFlag - Flag to enable region of interest
 *                     QRect *roiRect - Region of  interest rectangle
 * Return type       : QImage* - Output image after bilateral filtering
 * Functionality     : Function to apply bilateral filter on the given
 *                     input image and returns the processed image
 ******************************************************************************/
QImage *ImageSmootheningFilter::applyBilateralFilter(QImage *inputImage, const int &radius, const bool &roiFlag, QRect *roiRect) const
{
    cv::Mat inputMat =  qtOpenCVBridge->QImage2Mat(inputImage);
    cv::Mat outputMat = inputMat.clone();
    if(roiFlag == true){
        cv::Rect regionOfInterest(roiRect->x(), roiRect->y(), roiRect->width(), roiRect->height());
        cv::Mat croppedInputMat(inputMat, regionOfInterest);
        cv::Mat croppedOutputMat(outputMat, regionOfInterest);
        bilateralFilter(croppedInputMat, croppedOutputMat, radius, radius*2.0, radius/2.0);
    }
    else{
        bilateralFilter(inputMat, outputMat, radius, radius*2.0, radius/2.0);
    }
    return qtOpenCVBridge->Mat2QImage(outputMat);
}
Exemplo n.º 3
0
/*
src:     input image
method:  name of noise reduction method that shall be performed
	      "average" ==> moving average
         "median" ==> median filter
         "adaptive" ==> edge preserving average filter
         "bilateral" ==> bilateral filter
kSize:   (spatial) kernel size
param:   if method == "adaptive" : threshold ; if method == "bilateral" standard-deviation of radiometric kernel
         can be ignored otherwise (default value = 0)
return:  output image
*/
Mat Dip2::noiseReduction(Mat& src, string method, int kSize, double param){

   // apply moving average filter
   if (method.compare("average") == 0){
      return averageFilter(src, kSize);
   }
   // apply median filter
   if (method.compare("median") == 0){
      return medianFilter(src, kSize);
   }
   // apply adaptive average filter
   if (method.compare("adaptive") == 0){
      return adaptiveFilter(src, kSize, param);
   }
   // apply bilateral filter
   if (method.compare("bilateral") == 0){
      return bilateralFilter(src, kSize, param);
   }

   // if none of above, throw warning and return copy of original
   cout << "WARNING: Unknown filtering method! Returning original" << endl;
   cout << "Press enter to continue"  << endl;
   cin.get();
   return src.clone();

}
Exemplo n.º 4
0
Mat preProcess(Mat &img){

    Mat res = Mat(img.rows, img.cols, CV_8UC1);

    blur( img, res, Size( 3, 3 ), Point(-1,-1) );
    Mat aux = res.clone();
    bilateralFilter ( aux, res, 5, 5*2, 5/2 );
    aux = res.clone();
    cv::GaussianBlur(aux, res, cv::Size(0, 0), 3);
    cv::addWeighted(aux, 1.5, res, -0.5, 0, res);

    //Filtro de Wiener
    cvWiener2ADP(res, res, 5, 5);

    //Binarizacao e Afinamento
    threshold(res, res, mediana(res), 255, THRESH_BINARY_INV);

    //Esqueletização
    thinning(res);

    /*namedWindow("Preprocess", CV_WINDOW_AUTOSIZE);
    imshow("Preprocess", res);
    waitKey(0);*/
    return res;
}
Exemplo n.º 5
0
    void process(InputArray _src, OutputArray _dst)
    {
        Mat src = _src.getMat();
        CV_Assert(!src.empty());
        _dst.create(src.size(), CV_32FC3);
        Mat img = _dst.getMat();
        Ptr<Tonemap> linear = createTonemap(1.0f);
        linear->process(src, img);

        Mat gray_img;
        cvtColor(img, gray_img, COLOR_RGB2GRAY);
        Mat log_img;
        log(gray_img, log_img);
        Mat map_img;
        bilateralFilter(log_img, map_img, -1, sigma_color, sigma_space);

        double min, max;
        minMaxLoc(map_img, &min, &max);
        float scale = contrast / static_cast<float>(max - min);
        exp(map_img * (scale - 1.0f) + log_img, map_img);
        log_img.release();

        mapLuminance(img, img, gray_img, map_img, saturation);
        pow(img, 1.0f / gamma, img);
    }
Exemplo n.º 6
0
KDvoid Smoothing ( KDint nIdx )
{
	Mat		tSrc;
	Mat		tDst;

	KDint	MAX_KERNEL_LENGTH = 9;	// 31;
	KDint	i;

	// Load the source image
	tSrc = imread ( "/res/image/lena.jpg", 1 ); 
	tDst = tSrc.clone ( );

	for ( i = 1; i < MAX_KERNEL_LENGTH; i = i + 2 )
	{
		switch ( nIdx )
		{
			case 0 : blur ( tSrc, tDst, Size ( i, i ), Point ( -1, -1 ) );	break; // Applying Homogeneous blur 
			case 1 : GaussianBlur ( tSrc, tDst, Size ( i, i ), 0, 0 );		break; // Applying Gaussian blur 
			case 2 : medianBlur ( tSrc, tDst, i );							break; // Applying Median blur
			case 3 : bilateralFilter ( tSrc, tDst, i, i * 2, i / 2 );		break; // Applying Bilateral Filter
		}
	}

	g_pController->setFrame ( 1, tSrc );
	g_pController->setFrame ( 2, tDst );
}
ImageImPro* OpenImProLib_OpenCvImpl::filterBilateral(ImageImPro* ptrInput, int winDiameter, double sigmaRange, double sigmaSpace){
    Mat* ptrMatInput = ptrInput->getMat();
    Mat* ptrMatOutput = ptrInput->getMat();
    bilateralFilter( *ptrMatInput, *ptrMatOutput, winDiameter, sigmaRange, sigmaSpace);
    ImageImPro* ptrOutput = new ImageImPro_OpenCvImpl(ptrMatOutput);
    delete ptrMatInput;
    delete ptrMatOutput;
    return ptrOutput;
}
Exemplo n.º 8
0
void bilateral(Mat& frame) {
	Mat tmp;
	for (int i = 0; i < 2; ++i) {
		pyrDown(frame, frame, Size(frame.cols / 2, frame.rows / 2));
	}
	bilateralFilter(tmp, frame, 3, 3, 3);
	for (int i = 0; i < 2; ++i) {
		pyrUp(frame, frame, Size(frame.cols * 2, frame.rows * 2));
	}
}
Exemplo n.º 9
0
void bilateralSatured(Mat& frame) {
	Mat tmp;
	for (int i = 0; i < 2; ++i) {
		pyrDown(frame, frame, Size(frame.cols / 2, frame.rows / 2));
	}
	saturar(frame, tmp, 55);
	bilateralFilter(tmp, frame, 3, 3, 3);
	for (int i = 0; i < 2; ++i) {
		pyrUp(frame, frame, Size(frame.cols * 2, frame.rows * 2));
	}
}
Exemplo n.º 10
0
  bool BlurBlock::run(bool oneShot){
    Mat imgSrc = _myInputs["BLOCK__BLUR_IN_IMG"].get<cv::Mat>(),
      imgOut;

    switch (_myInputs["BLOCK__BLUR_IN_METHOD"].get<int>())
    {
    case 0://Mean
    {
      blur(imgSrc, imgOut, 
        cv::Size(_mySubParams["BLOCK__BLUR_IN_METHOD.Mean.kernel size X"].get<int>(),
        _mySubParams["BLOCK__BLUR_IN_METHOD.Mean.kernel size Y"].get<int>()),
        cv::Point(_mySubParams["BLOCK__BLUR_IN_METHOD.Mean.anchor point X"].get<int>(),
        _mySubParams["BLOCK__BLUR_IN_METHOD.Mean.anchor point Y"].get<int>()),
        cv::BORDER_DEFAULT);
      break;
    }
    case 1://Gaussian
    {
      cv::Size ksize(_mySubParams["BLOCK__BLUR_IN_METHOD.Gaussian.kernel size X"].get<int>(),
        _mySubParams["BLOCK__BLUR_IN_METHOD.Gaussian.kernel size Y"].get<int>());
      if (ksize.width <= 0) ksize.width = 1;
      if (ksize.width % 2 == 0) ksize.width += 1;
      if (ksize.height <= 0) ksize.height = 1;
      if (ksize.height % 2 == 0) ksize.height += 1;

      GaussianBlur(imgSrc, imgOut, ksize,
        _mySubParams["BLOCK__BLUR_IN_METHOD.Gaussian.Sigma X"].get<double>(),
        _mySubParams["BLOCK__BLUR_IN_METHOD.Gaussian.Sigma Y"].get<double>(),
        cv::BORDER_DEFAULT);
      break;
    }
    case 2://Median
    {
      int medianSize = _mySubParams["BLOCK__BLUR_IN_METHOD.Median.kernel size"].get<int>();
      if (medianSize % 2 != 1)
        medianSize += 1;
      medianBlur(imgSrc, imgOut, medianSize);
      break;
    }
    case 3://Bilateral
    {
      bilateralFilter(imgSrc, imgOut, _mySubParams["BLOCK__BLUR_IN_METHOD.Bilateral.Diameter"].get<int>(),
        _mySubParams["BLOCK__BLUR_IN_METHOD.Bilateral.Sigma color"].get<double>(),
        _mySubParams["BLOCK__BLUR_IN_METHOD.Bilateral.Sigma space"].get<double>());
      break;
    }
    default:
      return false;//nothing to do as we don't support this type of operation
      break;
    }
    _myOutputs["BLOCK__BLUR_OUT_IMAGE"] = imgOut;

    return true;
  };
Exemplo n.º 11
0
void Filter::applyFilter()
{
    configureSpinBox();
    switch(currentFilter){
        case FILTER_HOMOGENEOUS:
            blur(originalImage, image, ksize);
            ui->filteredImage->setPixmap(ImageHandler::getQPixmap(image));
        break;
    case FILTER_GAUSSIAN:
        GaussianBlur(originalImage, image, ksize, 0, 0);
        ui->filteredImage->setPixmap(ImageHandler::getQPixmap(image));
        break;
    case FILTER_MEDIAN:
        medianBlur(originalImage, image, ksize.height);
        ui->filteredImage->setPixmap(ImageHandler::getQPixmap(image));
        break;
    case FILTER_BILATERAL:
        bilateralFilter(originalImage, image, 5, sigma, sigma);
        break;
    }
}
int runFilter(int argc, char *argv[])
{
 char * filenameInput=argv[1];
 char * filenameOutput=argv[2];
 unsigned int inputType = guessFilenameTypeStupid(filenameInput);
 struct Image * inputImage = readImage(filenameInput,inputType,0);
 struct Image * outputImage = 0; //This will get allocated when and if needed

 if (inputImage!=0)
 {
    unsigned int outputType = guessFilenameTypeStupid(filenameOutput);
    unsigned int i=0;
      for (i=0; i<argc; i++)
      {

        if ( strcmp(argv[i],"--learn")==0 )
        {
          destroyImage(inputImage);
          learnImage(filenameInput,atoi(argv[i+1]),atoi(argv[i+2]));
          exit(0);
        } else
        if ( strcmp(argv[i],"--rgbcube")==0 )
        {
          unsigned int dim=32;
          unsigned char R = (char) atoi(argv[i]+1);
          unsigned char G = (char) atoi(argv[i]+2);
          unsigned char B = (char) atoi(argv[i]+3);

          outputImage = createImage( dim , dim , 3 , 8 );


           bitbltColorRGB(outputImage->pixels ,   0  ,  0 , dim , dim ,  R ,  G ,  B , dim-1 , dim-1);

           writeImageFile(outputImage,PPM_CODEC ,"new_mX.pnm");
           writeImageFile(outputImage,PPM_CODEC ,"new_pX.pnm");
           writeImageFile(outputImage,PPM_CODEC ,"new_mY.pnm");
           writeImageFile(outputImage,PPM_CODEC ,"new_pY.pnm");
           writeImageFile(outputImage,PPM_CODEC ,"new_mZ.pnm");
           writeImageFile(outputImage,PPM_CODEC ,"new_pZ.pnm");
          destroyImage(outputImage);

        } else
        if ( strcmp(argv[i],"--envcube")==0 )
        {
          fprintf(stdout,"Converting Environment Cube \n");
          unsigned int outputType = guessFilenameTypeStupid(filenameOutput);
          //outputImage = createSameDimensionsImage(inputImage);

          unsigned int outputWidth = inputImage->width;
          unsigned int outputHeight = (unsigned int ) (3*inputImage->width)/4;
          outputImage = createImage( outputWidth , outputHeight , 3 , 8 );

         createCubeMapFace(  outputImage->pixels ,  outputImage->width , outputImage->height , outputImage->channels , outputImage->bitsperpixel ,
                             inputImage->pixels ,  inputImage->width , inputImage->height , inputImage->channels , inputImage->bitsperpixel
                          );


         struct Image * partImg=0;
         unsigned int outX=0 , outY=0 , outWidth=0 , outHeight=0;
         getCubeMap2DCoords(outputWidth,outputHeight, /*x*/ -1 , /*y*/  0  , /*z*/  0 , &outX , &outY , &outWidth , &outHeight );
         partImg=createImageBitBlt( outputImage , outX , outY , outWidth , outHeight );
         writeImageFile(partImg,PPM_CODEC ,"new_mX.pnm"); destroyImage(partImg);

         getCubeMap2DCoords(outputWidth,outputHeight, /*x*/  1 , /*y*/  0  , /*z*/  0 , &outX , &outY , &outWidth , &outHeight );
         partImg=createImageBitBlt( outputImage , outX , outY , outWidth , outHeight );
         writeImageFile(partImg,PPM_CODEC ,"new_pX.pnm"); destroyImage(partImg);

         getCubeMap2DCoords(outputWidth,outputHeight, /*x*/  0 , /*y*/ -1  , /*z*/  0 , &outX , &outY , &outWidth , &outHeight );
         partImg=createImageBitBlt( outputImage , outX , outY , outWidth , outHeight );
         writeImageFile(partImg,PPM_CODEC ,"new_mY.pnm"); destroyImage(partImg);

         getCubeMap2DCoords(outputWidth,outputHeight, /*x*/  0 , /*y*/  1  , /*z*/  0 , &outX , &outY , &outWidth , &outHeight );
         partImg=createImageBitBlt( outputImage , outX , outY , outWidth , outHeight );
         writeImageFile(partImg,PPM_CODEC ,"new_pY.pnm"); destroyImage(partImg);

         getCubeMap2DCoords(outputWidth,outputHeight, /*x*/  0 , /*y*/  0  , /*z*/ -1 , &outX , &outY , &outWidth , &outHeight );
         partImg=createImageBitBlt( outputImage , outX , outY , outWidth , outHeight );
         writeImageFile(partImg,PPM_CODEC ,"new_mZ.pnm"); destroyImage(partImg);

         getCubeMap2DCoords(outputWidth,outputHeight, /*x*/  0 , /*y*/  0  , /*z*/  1 , &outX , &outY , &outWidth , &outHeight );
         partImg=createImageBitBlt( outputImage , outX , outY , outWidth , outHeight );
         writeImageFile(partImg,PPM_CODEC ,"new_pZ.pnm"); destroyImage(partImg);
        } else
        if ( strcmp(argv[i],"--compare")==0 )
        {
          unsigned int outputType = guessFilenameTypeStupid(filenameOutput);
          outputImage = readImage(filenameOutput,outputType ,0);

          float noise = calculatePSNR( outputImage->pixels ,  outputImage->width , outputImage->height , outputImage->channels ,
                                       inputImage->pixels ,  inputImage->width , inputImage->height , inputImage->channels );

           fprintf(stdout,"Compared Detected Noise is %0.4f dB \n",noise);
           exit(0);
        } else
        if ( strcmp(argv[i],"--gaussian")==0 )
        {
          monochrome(inputImage);
          outputImage = createSameDimensionsImage(inputImage);

         unsigned int normalizeGaussianKernel=1;
         unsigned int kernelWidth=5;
         unsigned int kernelHeight=5;
         float * convolutionMatrix=allocateGaussianKernel(kernelWidth,kernelHeight,normalizeGaussianKernel);
         float divisor=1.0;

         float * inF = copyUCharImage2Float(inputImage->pixels ,  inputImage->width , inputImage->height , inputImage->channels );
         float * outF = (float*) malloc(sizeof(float) *  outputImage->width * outputImage->height *  outputImage->channels );


         convolutionFilter1ChF(
                                 outF ,  outputImage->width , outputImage->height ,
                                 inF,  inputImage->width , inputImage->height ,
                                 convolutionMatrix , kernelWidth , kernelHeight , &divisor
                              );


         free(convolutionMatrix);

         castFloatImage2UChar(outputImage->pixels, outF, outputImage->width , outputImage->height ,  outputImage->channels );
         free(inF);
         free(outF);
        } else
        if ( strcmp(argv[i],"--ctbilateral")==0 )
        {
          monochrome(inputImage);
          outputImage = createSameDimensionsImage(inputImage);

          float sigma = atof(argv[i+1]);
          constantTimeBilateralFilter(
                                       inputImage->pixels  ,  inputImage->width , inputImage->height , inputImage->channels ,
                                       outputImage->pixels ,  outputImage->width , outputImage->height
                                      ,&sigma //sigma
                                      ,atoi(argv[i+2]) //bins
                                      ,atoi(argv[i+3]) //useDeriche
                                     );

        } else
        if ( strcmp(argv[i],"--deriche")==0 )
        {
          monochrome(inputImage);
          outputImage = createSameDimensionsImage(inputImage);
          float sigma = atof(argv[i+1]);
          dericheRecursiveGaussianGray( outputImage->pixels ,  outputImage->width , outputImage->height , inputImage->channels ,
                                        inputImage->pixels ,  inputImage->width , inputImage->height ,
                                        &sigma , atoi(argv[i+2])
                                       );
        } else
        if ( strcmp(argv[i],"--dericheF")==0 )
        {
          fprintf(stderr,"This is a test call for casting code , this shouldnt be normally used..\n");
          monochrome(inputImage);
          outputImage = createSameDimensionsImage(inputImage);
          float sigma = atof(argv[i+1]);

          //outputImage = copyImage(inputImage);
         float * inF = copyUCharImage2Float(inputImage->pixels ,  inputImage->width , inputImage->height , inputImage->channels );
         float * outF = (float*) malloc(sizeof(float) *  outputImage->width * outputImage->height *  outputImage->channels );

         dericheRecursiveGaussianGrayF(  outF  ,  outputImage->width , outputImage->height ,  inputImage->channels ,
                                         inF ,  inputImage->width , inputImage->height  ,
                                         &sigma , atoi(argv[i+2])
                                        );

         castFloatImage2UChar(outputImage->pixels, outF, outputImage->width , outputImage->height ,  outputImage->channels );
         free(inF);
         free(outF);
        } else
        if ( strcmp(argv[i],"--median")==0 )
        {
           outputImage = copyImage(inputImage);
           medianFilter3ch(
                         outputImage->pixels ,  outputImage->width , outputImage->height ,
                         inputImage->pixels ,  inputImage->width , inputImage->height  ,
                         atoi(argv[i+1]) , atoi(argv[i+2])
                        );
        } else
        if ( strcmp(argv[i],"--meansat")==0 )
        {
           outputImage = copyImage(inputImage);
           meanFilterSAT(
                         outputImage->pixels ,  outputImage->width , outputImage->height , outputImage->channels ,
                         inputImage->pixels ,  inputImage->width , inputImage->height , inputImage->channels ,
                         atoi(argv[i+1]) , atoi(argv[i+2])
                        );
        } else
        if ( strcmp(argv[i],"--monochrome")==0 )
        {
          outputImage = copyImage(inputImage);
          monochrome(outputImage);
        } else
        if ( strcmp(argv[i],"--bilateral")==0 )
        {
          outputImage = copyImage(inputImage);
          bilateralFilter( outputImage->pixels ,  outputImage->width , outputImage->height ,
                           inputImage->pixels ,  inputImage->width , inputImage->height ,
                            atof(argv[i+1]) , atof(argv[i+2]) , atoi(argv[i+3])
                         );
        } else
        if ( strcmp(argv[i],"--contrast")==0 )
        {
          outputImage = copyImage(inputImage);
          contrast(outputImage,atof(argv[i+1]));
        } else
        if ( strcmp(argv[i],"--sattest")==0 )
        {
            float * tmp = allocateGaussianKernel(3,5.0,1);
            if (tmp!=0) { free(tmp); }

            tmp = allocateGaussianKernel(9,5.0,1);
            if (tmp!=0) { free(tmp); }


            tmp = allocateGaussianKernel(15,5.0,1);
            if (tmp!=0) { free(tmp); }


            summedAreaTableTest();
            unsigned int * integralImageOutput = 0;
            integralImageOutput = generateSummedAreaTableRGB(inputImage->pixels ,  inputImage->width , inputImage->height);
            if (integralImageOutput!=0)
            {
              free(integralImageOutput);
              fprintf(stderr,"integralImage test was successful\n");
            }
        }
      }

    writeImageFile(outputImage,outputType ,filenameOutput);
    destroyImage(outputImage);
    destroyImage(inputImage);
    return 1;
 }
 return 0;
}
Exemplo n.º 13
0
void RGBDCamera::update(const RawFrame* this_frame) {
  //Check the timestamp, and skip if we have already seen this frame
  if (this_frame->timestamp <= latest_stamp_) {
    return;
  } else {
    latest_stamp_ = this_frame->timestamp;
  }

  //Apply bilateral filter to incoming depth
  uint16_t* filtered_depth;
  cudaMalloc((void**)&filtered_depth, this_frame->width*this_frame->height*sizeof(uint16_t));
  bilateralFilter(this_frame->depth, filtered_depth, this_frame->width, this_frame->height);

  //Convert the input color data to intensity
  float* temp_intensity;
  cudaMalloc((void**)&temp_intensity, this_frame->width*this_frame->height*sizeof(float));
  colorToIntensity(this_frame->color, temp_intensity, this_frame->width*this_frame->height);

  //Create pyramids
  for (int i = 0; i < PYRAMID_DEPTH; i++) {
    //Fill in sizes the first two times through
    if (pass_ < 2) {
      current_icp_frame_[i] = new ICPFrame(this_frame->width/pow(2,i), this_frame->height/pow(2,i));
      current_rgbd_frame_[i] = new RGBDFrame(this_frame->width/pow(2,i), this_frame->height/pow(2,i));
    }

    //Add ICP data
    generateVertexMap(filtered_depth, current_icp_frame_[i]->vertex, current_icp_frame_[i]->width, current_icp_frame_[i]->height, focal_length_, make_int2(this_frame->width, this_frame->height));
    generateNormalMap(current_icp_frame_[i]->vertex, current_icp_frame_[i]->normal, current_icp_frame_[i]->width, current_icp_frame_[i]->height);

    //Add RGBD data
    cudaMemcpy(current_rgbd_frame_[i]->vertex, current_icp_frame_[i]->vertex, current_rgbd_frame_[i]->width*current_rgbd_frame_[i]->height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
    cudaMemcpy(current_rgbd_frame_[i]->intensity, temp_intensity, current_rgbd_frame_[i]->width*current_rgbd_frame_[i]->height*sizeof(float), cudaMemcpyDeviceToDevice);

    //Downsample depth and color if not the last iteration
    if (i != (PYRAMID_DEPTH-1)) {
      subsampleDepth(filtered_depth, current_icp_frame_[i]->width, current_icp_frame_[i]->height);
      subsample(temp_intensity, current_rgbd_frame_[i]->width, current_rgbd_frame_[i]->height);
      cudaDeviceSynchronize();
    }
  }

  //Clear the filtered depth and temporary color since they are no longer needed
  cudaFree(filtered_depth);
  cudaFree(temp_intensity);

  if (pass_ >= 1) {
    glm::mat4 update_trans(1.0f);

    //Loop through pyramids backwards (coarse first)
    for (int i = PYRAMID_DEPTH - 1; i >= 0; i--) {

      //Get a copy of the ICP frame for this pyramid level
      ICPFrame icp_f(current_icp_frame_[i]->width, current_icp_frame_[i]->height);
      cudaMemcpy(icp_f.vertex, current_icp_frame_[i]->vertex, icp_f.width*icp_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
      cudaMemcpy(icp_f.normal, current_icp_frame_[i]->normal, icp_f.width*icp_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);

      //Get a copy of the RGBD frame for this pyramid level
      //RGBDFrame rgbd_f(current_rgbd_frame_[i]->width, current_rgbd_frame_[i]->height);
      //cudaMemcpy(rgbd_f.vertex, current_rgbd_frame_[i]->vertex, rgbd_f.width*rgbd_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
      //cudaMemcpy(rgbd_f.intensity, current_rgbd_frame_[i]->intensity, rgbd_f.width*rgbd_f.height*sizeof(float), cudaMemcpyDeviceToDevice);

      //Apply the most recent update to the points/normals
      if (i < (PYRAMID_DEPTH-1)) {
        transformVertexMap(icp_f.vertex, update_trans, icp_f.width*icp_f.height);
        transformNormalMap(icp_f.normal, update_trans, icp_f.width*icp_f.height);
        cudaDeviceSynchronize();
      }

      //Loop through iterations
      for (int j = 0; j < PYRAMID_ITERS[i]; j++) {

        //Get the Geometric ICP cost values
        float A1[6 * 6];
        float b1[6];
        computeICPCost2(last_icp_frame_[i], icp_f, A1, b1);

        //Get the Photometric RGB-D cost values
        //float A2[6*6];
        //float b2[6];
        //compueRGBDCost(last_rgbd_frame_, rgbd_f, A2, b2);

        //Combine the two
        //for (size_t k = 0; k < 6; k++) {
          //for (size_t l = 0; l < 6; l++) {
            //A1[6 * k + l] += A2[6 * k + l];
          //}
          //b1[k] += b2[k];
        //}

        //Solve for the optimized camera transformation
        float x[6];
        solveCholesky(6, A1, b1, x);

        //Check for NaN/divergence
        if (isnan(x[0]) || isnan(x[1]) || isnan(x[2]) || isnan(x[3]) || isnan(x[4]) || isnan(x[5])) {
          printf("Camera tracking is lost.\n");
          break;
        }

        //Update position/orientation of the camera
        glm::mat4 this_trans = 
            glm::rotate(glm::mat4(1.0f), -x[2] * 180.0f / 3.14159f, glm::vec3(0.0f, 0.0f, 1.0f)) 
          * glm::rotate(glm::mat4(1.0f), -x[1] * 180.0f / 3.14159f, glm::vec3(0.0f, 1.0f, 0.0f))
          * glm::rotate(glm::mat4(1.0f), -x[0] * 180.0f / 3.14159f, glm::vec3(1.0f, 0.0f, 0.0f)) 
          * glm::translate(glm::mat4(1.0f), glm::vec3(x[3], x[4], x[5]));

        update_trans = this_trans * update_trans;
        
        //Apply the update to the points/normals
        if (j < (PYRAMID_ITERS[i] - 1)) {
          transformVertexMap(icp_f.vertex, this_trans, icp_f.width*icp_f.height);
          transformNormalMap(icp_f.normal, this_trans, icp_f.width*icp_f.height);
          cudaDeviceSynchronize();
        }

      }
    }
    //Update the global transform with the result
    position_ = glm::vec3(glm::vec4(position_, 1.0f) * update_trans);
    orientation_ = glm::mat3(glm::mat4(orientation_) * update_trans);
  }

  if (pass_ < 2) {
    pass_++;
  }

  //Swap current and last frames
  for (int i = 0; i < PYRAMID_DEPTH; i++) {
    ICPFrame* temp = current_icp_frame_[i];
    current_icp_frame_[i] = last_icp_frame_[i];
    last_icp_frame_[i] = temp;
    //TODO: Longterm, only RGBD should do this. ICP should not swap, as last_frame should be updated by a different function
    RGBDFrame* temp2 = current_rgbd_frame_[i];
    current_rgbd_frame_[i] = last_rgbd_frame_[i];
    last_rgbd_frame_[i] = temp2;
  }

}
Exemplo n.º 14
0
int opencv::ProcessImg(Mat& src)
{
	try{
		if (src.empty()) return 1;
		Mat color, gray, binimg, bingray;
		Convert2BGR(src, color);
		Convert2GRAY(src, gray); Convert2GRAY(src, bingray);

		exColor_Invert(gray);

		vector<vector<Point>> contours;

		int trd = get_OTSU_value(gray);

		threshold(bingray, binimg, trd, 255, THRESH_BINARY);



		Mat Fiter;

#if 1
		//双边滤波
		bilateralFilter(gray, Fiter, 15, 50, 2.0, BORDER_DEFAULT);

#else

		//中值滤波
		int kenerlsize = gray.rows / 150;
		if (0 == kenerlsize % 2) kenerlsize += 1;
		kenerlsize = kenerlsize > 2 ? kenerlsize : 3;
		medianBlur(gray, gray, kenerlsize);
#endif
		//Histogram(gray, gray);

		findContours(binimg, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));

		Mat imgmask(binimg.rows, binimg.cols, CV_8UC1, Scalar(255));
		//drawContours(imgmask, contours, -1, Scalar(255), CV_FILLED/*2*/);   // -1 表示所有轮廓
		//drawContours(imgmask, contours, -1, Scalar(0), 2);   // -1 表示所有轮廓
#ifdef _DEBUG
		for (size_t i = 0; i < contours.size(); i++)
			cout << contours[i].size()<<endl;
#endif
		get_mask_image(src, imgmask, contours, -1,1,2);
		src = imgmask.clone();
		return 1;

		Convert2GRAY(gray, gray);
		Mat edge;
		Canny(gray, edge, 0, 255, 5);
		src = edge.clone();
		return NoError;


		//模式识别
	//	ANN_MLP ann;
	//	ann.train();




	}
	catch (...)
	{
#ifdef _DEBUG
		cout << "img deal wrongly" << endl;

#endif
		return -1;
	}
}
Exemplo n.º 15
0
void tmo_durand02(pfs::Array2Df& R, pfs::Array2Df& G, pfs::Array2Df& B,
                  float sigma_s, float sigma_r, float baseContrast, int downsample,
                  bool color_correction,
                  pfs::Progress &ph)
{
    int w = R.getCols();
    int h = R.getRows();
    int size = w*h;

    pfs::Array2Df I(w,h); // intensities
    pfs::Array2Df BASE(w,h); // base layer
    pfs::Array2Df DETAIL(w,h); // detail layer

    float min_pos = 1e10f; // minimum positive value (to avoid log(0))
    for (int i = 0 ; i < size ; i++)
    {
        I(i) = 1.0f/61.0f * ( 20.0f*R(i) + 40.0f*G(i) + B(i) );
        if ( I(i) < min_pos && I(i) > 0.0f )
        {
            min_pos = I(i);
        }
    }

    for (int i = 0 ; i < size ; i++)
    {
        float L = I(i);
        if ( L <= 0.0f )
        {
            L = min_pos;
        }

        R(i) /= L;
        G(i) /= L;
        B(i) /= L;

        I(i) = std::log( L );
    }

#ifdef HAVE_FFTW3F
    fastBilateralFilter( I, BASE, sigma_s, sigma_r, downsample, ph );
#else
    bilateralFilter( &I, &BASE, sigma_s, sigma_r, ph );
#endif

    //!! FIX: find minimum and maximum luminance, but skip 1% of outliers
    float maxB;
    float minB;
    findMaxMinPercentile(&BASE, 0.01f, 0.99f, minB, maxB);

    float compressionfactor = baseContrast / (maxB - minB);

    // Color correction factor
    const float k1 = 1.48f;
    const float k2 = 0.82f;
    const float s = ( (1 + k1)*pow(compressionfactor,k2) )/( 1 + k1*pow(compressionfactor,k2) );

    for (int i = 0 ; i < size ; i++)
    {
        DETAIL(i) = I(i) - BASE(i);
        I(i) = BASE(i) * compressionfactor + DETAIL(i);

        //!! FIX: this to keep the output in normalized range 0.01 - 1.0
        //intensitites are related only to minimum luminance because I
        //would say this is more stable over time than using maximum
        //luminance and is also robust against random peaks of very high
        //luminance
        I(i) -=  4.3f+minB*compressionfactor;

        if ( color_correction )
        {
            R(i) = decode( std::pow( R(i), s ) *  std::exp( I(i) ) );
            G(i) = decode( std::pow( G(i), s ) *  std::exp( I(i) ) );
            B(i) = decode( std::pow( B(i), s ) *  std::exp( I(i) ) );
        }
        else
        {
            R(i) *= decode( std::exp( I(i) ) );
            G(i) *= decode( std::exp( I(i) ) );
            B(i) *= decode( std::exp( I(i) ) );
        }
    }

    if (!ph.canceled())
    {
        ph.setValue( 100 );
    }
}
Exemplo n.º 16
0
  void PlateLines::processImage(Mat inputImage, vector<TextLine> textLines, float sensitivity)
  {
    if (this->debug)
      cout << "PlateLines findLines" << endl;

    timespec startTime;
    getTimeMonotonic(&startTime);


    // Ignore input images that are pure white or pure black
    Scalar avgPixelIntensity = mean(inputImage);
    if (avgPixelIntensity[0] >= 252)
      return;
    else if (avgPixelIntensity[0] <= 3)
      return;

    // Do a bilateral filter to clean the noise but keep edges sharp
    Mat smoothed(inputImage.size(), inputImage.type());
    bilateralFilter(inputImage, smoothed, 3, 45, 45);


    int morph_elem  = 2;
    int morph_size = 2;
    Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );


    Mat edges(inputImage.size(), inputImage.type());
    Canny(smoothed, edges, 66, 133);

    // Create a mask that is dilated based on the detected characters


    Mat mask = Mat::zeros(inputImage.size(), CV_8U);

    for (unsigned int i = 0; i < textLines.size(); i++)
    {
      vector<vector<Point> > polygons;
      polygons.push_back(textLines[i].textArea);
      fillPoly(mask, polygons, Scalar(255,255,255));
    }



    dilate(mask, mask, getStructuringElement( 1, Size( 1 + 1, 2*1+1 ), Point( 1, 1 ) ));
    bitwise_not(mask, mask);

    // AND canny edges with the character mask
    bitwise_and(edges, mask, edges);


    vector<PlateLine> hlines = this->getLines(edges, sensitivity, false);
    vector<PlateLine> vlines = this->getLines(edges, sensitivity, true);
    for (unsigned int i = 0; i < hlines.size(); i++)
      this->horizontalLines.push_back(hlines[i]);
    for (unsigned int i = 0; i < vlines.size(); i++)
      this->verticalLines.push_back(vlines[i]);

    // if debug is enabled, draw the image
    if (this->debug)
    {
      Mat debugImgHoriz(edges.size(), edges.type());
      Mat debugImgVert(edges.size(), edges.type());
      edges.copyTo(debugImgHoriz);
      edges.copyTo(debugImgVert);
      cvtColor(debugImgHoriz,debugImgHoriz,CV_GRAY2BGR);
      cvtColor(debugImgVert,debugImgVert,CV_GRAY2BGR);

      for( size_t i = 0; i < this->horizontalLines.size(); i++ )
      {
        line( debugImgHoriz, this->horizontalLines[i].line.p1, this->horizontalLines[i].line.p2, Scalar(0,0,255), 1, CV_AA);
      }

      for( size_t i = 0; i < this->verticalLines.size(); i++ )
      {
        line( debugImgVert, this->verticalLines[i].line.p1, this->verticalLines[i].line.p2, Scalar(0,0,255), 1, CV_AA);
      }

      vector<Mat> images;
      images.push_back(debugImgHoriz);
      images.push_back(debugImgVert);

      Mat dashboard = drawImageDashboard(images, debugImgVert.type(), 1);
      displayImage(pipelineData->config, "Hough Lines", dashboard);
    }

    if (pipelineData->config->debugTiming)
    {
      timespec endTime;
      getTimeMonotonic(&endTime);
      cout << "Plate Lines Time: " << diffclock(startTime, endTime) << "ms." << endl;
    }

  }
Exemplo n.º 17
0
// Create a grayscale face image that has a standard size and contrast & brightness.
// "srcImg" should be a copy of the whole color camera frame, so that it can draw the eye positions onto.
// If 'doLeftAndRightSeparately' is true, it will process left & right sides seperately,
// so that if there is a strong light on one side but not the other, it will still look OK.
// Performs Face Preprocessing as a combination of:
//  - geometrical scaling, rotation and translation using Eye Detection,
//  - smoothing away image noise using a Bilateral Filter,
//  - standardize the brightness on both left and right sides of the face independently using separated Histogram Equalization,
//  - removal of background and hair using an Elliptical Mask.
// Returns either a preprocessed face square image or NULL (ie: couldn't detect the face and 2 eyes).
// If a face is found, it can store the rect coordinates into 'storeFaceRect' and 'storeLeftEye' & 'storeRightEye' if given,
// and eye search regions into 'searchedLeftEye' & 'searchedRightEye' if given.
cv::Mat preprocessFace::getPreprocessedFace(cv::Mat &srcImg, int desiredFaceWidth, cv::CascadeClassifier &faceCascade, cv::CascadeClassifier &eyeCascade1, cv::CascadeClassifier &eyeCascade2, bool doLeftAndRightSeparately, cv::Rect *storeFaceRect, cv::Point *storeLeftEye, cv::Point *storeRightEye, cv::Rect *searchedLeftEye, cv::Rect *searchedRightEye)
{
	// Use square faces.
	int desiredFaceHeight = desiredFaceWidth;

	// Mark the detected face region and eye search regions as invalid, in case they aren't detected.
	if (storeFaceRect)
		storeFaceRect->width = -1;
	if (storeLeftEye)
		storeLeftEye->x = -1;
	if (storeRightEye)
		storeRightEye->x = -1;
	if (searchedLeftEye)
		searchedLeftEye->width = -1;
	if (searchedRightEye)
		searchedRightEye->width = -1;

	// Find the largest face.
	cv::Rect faceRect;
	detector.detectLargestObject(srcImg, faceCascade, faceRect);

	// Check if a face was detected.
	if (faceRect.width > 0) {

		// Give the face rect to the caller if desired.
		if (storeFaceRect)
			*storeFaceRect = faceRect;

		cv::Mat faceImg = srcImg(faceRect);    // Get the detected face image.

		// If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
		cv::Mat gray;
		if (faceImg.channels() == 3) {
			cvtColor(faceImg, gray, CV_BGR2GRAY);
		}
		else if (faceImg.channels() == 4) {
			cvtColor(faceImg, gray, CV_BGRA2GRAY);
		}
		else {
			// Access the input image directly, since it is already grayscale.
			gray = faceImg;
		}

		// Search for the 2 eyes at the full resolution, since eye detection needs max resolution possible!
		cv::Point leftEye, rightEye;
		detectBothEyes(gray, eyeCascade1, eyeCascade2, leftEye, rightEye, searchedLeftEye, searchedRightEye);

		// Give the eye results to the caller if desired.
		if (storeLeftEye)
			*storeLeftEye = leftEye;
		if (storeRightEye)
			*storeRightEye = rightEye;

		// Check if both eyes were detected.
		if (leftEye.x >= 0 && rightEye.x >= 0) 
		{
			inputFaceCount++;

			// Make the face image the same size as the training images.

			// Since we found both eyes, lets rotate & scale & translate the face so that the 2 eyes
			// line up perfectly with ideal eye positions. This makes sure that eyes will be horizontal,
			// and not too far left or right of the face, etc.

			// Get the center between the 2 eyes.
			cv::Point2f eyesCenter = cv::Point2f((leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f);
			// Get the angle between the 2 eyes.
			double dy = (rightEye.y - leftEye.y);
			double dx = (rightEye.x - leftEye.x);
			double len = sqrt(dx*dx + dy*dy);
			double angle = atan2(dy, dx) * 180.0 / CV_PI; // Convert from radians to degrees.

			// Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
			const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
			// Get the amount we need to scale the image to be the desired fixed size we want.
			double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
			double scale = desiredLen / len;
			// Get the transformation matrix for rotating and scaling the face to the desired angle & size.
			cv::Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
			// Shift the center of the eyes to be the desired center between the eyes.
			rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
			rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

			// Rotate and scale and translate the image to the desired angle & size & position!
			// Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
			cv::Mat warped = cv::Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, cv::Scalar(128)); // Clear the output image to a default grey.
			warpAffine(gray, warped, rot_mat, warped.size());
			//imshow("warped", warped);

			// Give the image a standard brightness and contrast, in case it was too dark or had low contrast.
			if (!doLeftAndRightSeparately) {
				// Do it on the whole face.
				equalizeHist(warped, warped);
			}
			else {
				// Do it seperately for the left and right sides of the face.
				equalizeLeftAndRightHalves(warped);
			}
			//imshow("equalized", warped);

			// Use the "Bilateral Filter" to reduce pixel noise by smoothing the image, but keeping the sharp edges in the face.
			cv::Mat filtered = cv::Mat(warped.size(), CV_8U);
			bilateralFilter(warped, filtered, 0, 20.0, 2.0);
			//imshow("filtered", filtered);

			// Filter out the corners of the face, since we mainly just care about the middle parts.
			// Draw a filled ellipse in the middle of the face-sized image.
			cv::Mat mask = cv::Mat(warped.size(), CV_8U, cv::Scalar(0)); // Start with an empty mask.
			cv::Point faceCenter = cv::Point(desiredFaceWidth / 2, cvRound(desiredFaceHeight * FACE_ELLIPSE_CY));
			cv::Size size = cv::Size(cvRound(desiredFaceWidth * FACE_ELLIPSE_W), cvRound(desiredFaceHeight * FACE_ELLIPSE_H));
			ellipse(mask, faceCenter, size, 0, 0, 360, cv::Scalar(255), CV_FILLED);
			//imshow("mask", mask);

			// Use the mask, to remove outside pixels.
			cv::Mat dstImg = cv::Mat(warped.size(), CV_8U, cv::Scalar(128)); // Clear the output image to a default gray.
			/*
			namedWindow("filtered");
			imshow("filtered", filtered);
			namedWindow("dstImg");
			imshow("dstImg", dstImg);
			namedWindow("mask");
			imshow("mask", mask);
			*/
			// Apply the elliptical mask on the face.
			filtered.copyTo(dstImg, mask);  // Copies non-masked pixels from filtered to dstImg.
			//imshow("dstImg", dstImg);


			/*if (leftEye.x >= 0 && rightEye.x >= 0)
			{
				eyeRegion.x = 0;
				eyeRegion.y = searchedLeftEye.y;
				eyeRegion.width = preprocessedFace.rows;
				eyeRegion.height = 30;
				cv::rectangle(frame, eyeRegion, CV_RGB(255, 255, 0), 3);
			}*/

			if (needResults)
			{
				writeResults(srcImg, gray, dstImg);
			}
			
			return dstImg;
		}
		/*
		else {
		// Since no eyes were found, just do a generic image resize.
		resize(gray, tmpImg, Size(w,h));
		}
		*/
	}
	return cv::Mat();
}
Exemplo n.º 18
0
 void CV_BilateralFilterTest::run_func()
 {
     bilateralFilter(_src, _parallel_dst, _d, _sigma_color, _sigma_space);
 }