Example #1
0
bool  PxMEncoder::write( const Mat& img, const std::vector<int>& params )
{
    bool isBinary = true;

    int  width = img.cols, height = img.rows;
    int  _channels = img.channels(), depth = (int)img.elemSize1()*8;
    int  channels = _channels > 1 ? 3 : 1;
    int  fileStep = width*(int)img.elemSize();
    int  x, y;

    for( size_t i = 0; i < params.size(); i += 2 )
        if( params[i] == CV_IMWRITE_PXM_BINARY )
            isBinary = params[i+1] != 0;

    WLByteStream strm;

    if( m_buf )
    {
        if( !strm.open(*m_buf) )
            return false;
        int t = CV_MAKETYPE(img.depth(), channels);
        m_buf->reserve( alignSize(256 + (isBinary ? fileStep*height :
            ((t == CV_8UC1 ? 4 : t == CV_8UC3 ? 4*3+2 :
            t == CV_16UC1 ? 6 : 6*3+2)*width+1)*height), 256));
    }
    else if( !strm.open(m_filename) )
        return false;

    int  lineLength;
    int  bufferSize = 128; // buffer that should fit a header

    if( isBinary )
        lineLength = width * (int)img.elemSize();
    else
        lineLength = (6 * channels + (channels > 1 ? 2 : 0)) * width + 32;

    if( bufferSize < lineLength )
        bufferSize = lineLength;

    AutoBuffer<char> _buffer(bufferSize);
    char* buffer = _buffer;

    // write header;
    sprintf( buffer, "P%c\n%d %d\n%d\n",
             '2' + (channels > 1 ? 1 : 0) + (isBinary ? 3 : 0),
             width, height, (1 << depth) - 1 );

    strm.putBytes( buffer, (int)strlen(buffer) );

    for( y = 0; y < height; y++ )
    {
        const uchar* const data = img.ptr(y);
        if( isBinary )
        {
            if( _channels == 3 )
            {
                if( depth == 8 )
                    icvCvt_BGR2RGB_8u_C3R( (const uchar*)data, 0,
                        (uchar*)buffer, 0, cvSize(width,1) );
                else
                    icvCvt_BGR2RGB_16u_C3R( (const ushort*)data, 0,
                        (ushort*)buffer, 0, cvSize(width,1) );
            }

            // swap endianness if necessary
            if( depth == 16 && !isBigEndian() )
            {
                if( _channels == 1 )
                    memcpy( buffer, data, fileStep );
                for( x = 0; x < width*channels*2; x += 2 )
                {
                    uchar v = buffer[x];
                    buffer[x] = buffer[x + 1];
                    buffer[x + 1] = v;
                }
            }
            strm.putBytes( (channels > 1 || depth > 8) ? buffer : (const char*)data, fileStep );
        }
        else
        {
            char* ptr = buffer;

            if( channels > 1 )
            {
                if( depth == 8 )
                {
                    for( x = 0; x < width*channels; x += channels )
                    {
                        sprintf( ptr, "% 4d", data[x + 2] );
                        ptr += 4;
                        sprintf( ptr, "% 4d", data[x + 1] );
                        ptr += 4;
                        sprintf( ptr, "% 4d", data[x] );
                        ptr += 4;
                        *ptr++ = ' ';
                        *ptr++ = ' ';
                    }
                }
                else
                {
                    for( x = 0; x < width*channels; x += channels )
                    {
                        sprintf( ptr, "% 6d", ((const ushort *)data)[x + 2] );
                        ptr += 6;
                        sprintf( ptr, "% 6d", ((const ushort *)data)[x + 1] );
                        ptr += 6;
                        sprintf( ptr, "% 6d", ((const ushort *)data)[x] );
                        ptr += 6;
                        *ptr++ = ' ';
                        *ptr++ = ' ';
                    }
                }
            }
            else
            {
                if( depth == 8 )
                {
                    for( x = 0; x < width; x++ )
                    {
                        sprintf( ptr, "% 4d", data[x] );
                        ptr += 4;
                    }
                }
                else
                {
                    for( x = 0; x < width; x++ )
                    {
                        sprintf( ptr, "% 6d", ((const ushort *)data)[x] );
                        ptr += 6;
                    }
                }
            }

            *ptr++ = '\n';

            strm.putBytes( buffer, (int)(ptr - buffer) );
        }
    }

    strm.close();
    return true;
}
Example #2
0
bool  PngEncoder::write( const Mat& img, const std::vector<int>& params )
{
    png_structp png_ptr = png_create_write_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 );
    png_infop info_ptr = 0;
    FILE * volatile f = 0;
    int y, width = img.cols, height = img.rows;
    int depth = img.depth(), channels = img.channels();
    volatile bool result = false;
    AutoBuffer<uchar*> buffer;

    if( depth != CV_8U && depth != CV_16U )
        return false;

    if( png_ptr )
    {
        info_ptr = png_create_info_struct( png_ptr );

        if( info_ptr )
        {
            if( setjmp( png_jmpbuf ( png_ptr ) ) == 0 )
            {
                if( m_buf )
                {
                    png_set_write_fn(png_ptr, this,
                        (png_rw_ptr)writeDataToBuf, (png_flush_ptr)flushBuf);
                }
                else
                {
                    f = fopen( m_filename.c_str(), "wb" );
                    if( f )
                        png_init_io( png_ptr, (png_FILE_p)f );
                }

                int compression_level = -1; // Invalid value to allow setting 0-9 as valid
                int compression_strategy = IMWRITE_PNG_STRATEGY_RLE; // Default strategy
                bool isBilevel = false;

                for( size_t i = 0; i < params.size(); i += 2 )
                {
                    if( params[i] == IMWRITE_PNG_COMPRESSION )
                    {
                        compression_strategy = IMWRITE_PNG_STRATEGY_DEFAULT; // Default strategy
                        compression_level = params[i+1];
                        compression_level = MIN(MAX(compression_level, 0), Z_BEST_COMPRESSION);
                    }
                    if( params[i] == IMWRITE_PNG_STRATEGY )
                    {
                        compression_strategy = params[i+1];
                        compression_strategy = MIN(MAX(compression_strategy, 0), Z_FIXED);
                    }
                    if( params[i] == IMWRITE_PNG_BILEVEL )
                    {
                        isBilevel = params[i+1] != 0;
                    }
                }

                if( m_buf || f )
                {
                    if( compression_level >= 0 )
                    {
                        png_set_compression_level( png_ptr, compression_level );
                    }
                    else
                    {
                        // tune parameters for speed
                        // (see http://wiki.linuxquestions.org/wiki/Libpng)
                        png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_SUB);
                        png_set_compression_level(png_ptr, Z_BEST_SPEED);
                    }
                    png_set_compression_strategy(png_ptr, compression_strategy);

                    png_set_IHDR( png_ptr, info_ptr, width, height, depth == CV_8U ? isBilevel?1:8 : 16,
                        channels == 1 ? PNG_COLOR_TYPE_GRAY :
                        channels == 3 ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGBA,
                        PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,
                        PNG_FILTER_TYPE_DEFAULT );

                    png_write_info( png_ptr, info_ptr );

                    if (isBilevel)
                        png_set_packing(png_ptr);

                    png_set_bgr( png_ptr );
                    if( !isBigEndian() )
                        png_set_swap( png_ptr );

                    buffer.allocate(height);
                    for( y = 0; y < height; y++ )
                        buffer[y] = img.data + y*img.step;

                    png_write_image( png_ptr, buffer );
                    png_write_end( png_ptr, info_ptr );

                    result = true;
                }
            }
        }
    }

    png_destroy_write_struct( &png_ptr, &info_ptr );
    if(f) fclose( (FILE*)f );

    return result;
}
Example #3
0
int main(int argc, char **argv) {
  InputData input_params(argc,argv);

  // Init Caffe Net
  LOG(INFO) << "Init caffe...";
  if (input_params.gpu_en) {
    Caffe::set_mode(Caffe::GPU);
    Caffe::SetDevice(input_params.gpu_id);
  } else {
    Caffe::set_mode(Caffe::CPU);
  }
  Caffe::set_phase(Caffe::TEST);
  Net<float> caffe_net(input_params.protoName);
  caffe_net.CopyTrainedLayersFrom(input_params.modelName);

  if (::strcmp(input_params.blobs[0].c_str(),"all")==0) {  // extract all layers
    input_params.blobs = caffe_net.blob_names();
    input_params.wrPrefix = caffe_net.blob_names();
    // '/' cannot appear in path, and we replace it with '_'
    for (int i=0; i<input_params.wrPrefix.size(); ++i) {  
      for (int j=0; j<input_params.wrPrefix[i].length(); ++j) {
        if (input_params.wrPrefix[i][j] == '/') {
          input_params.wrPrefix[i][j] = '_';
        }
      }
    }
  }
  CHECK_EQ(input_params.wrPrefix.size(),input_params.blobs.size())
    << "wr_prefix.size() != blobs.size()";

  // Check feat specified
  const int feat_nums = input_params.blobs.size();
  CHECK_GT(feat_nums,0)
    << "No blob names specified in command line.";
  for (size_t i = 0; i < feat_nums; i++) {
    CHECK(caffe_net.has_blob(input_params.blobs[i]))
      << "Unknown feature blob name " << input_params.blobs[i]
      << " in the network " << input_params.protoName;
  }

  // Preprocess
  LOG(INFO) << "Prepare image data...";
  const vector<Blob<float>* >& input_blobs = caffe_net.input_blobs();
  const int channels = input_blobs[0]->channels(); 
  const int resize_height = input_blobs[0]->height(); 
  const int resize_width  = input_blobs[0]->width(); 
  const int data_count = input_blobs[0]->count();
  Mat img = cv::imread(input_params.imageName.c_str(),1);
  if (!img.data) {
    LOG(ERROR) << "Cannot open image.";
    return -1;
  }
  CHECK_EQ(img.channels(),channels)
    << "Input image channel dismatch the Net.";

  Mat resized_img;
  cv::resize(img,resized_img,cv::Size(resize_height,resize_width));
  float *_data_ptr = new float[data_count];
  if (-1 == prepare_image(resized_img,_data_ptr,data_count) ) {
    return -1;
  }
  const float* const data_ptr = reinterpret_cast<const float* const>(_data_ptr);

  // Filling input and Extract feature
  switch (Caffe::mode()) {
  case Caffe::GPU:
    caffe::caffe_copy(data_count,data_ptr,input_blobs[0]->mutable_gpu_data());
    break;
  case Caffe::CPU:
    caffe::caffe_copy(data_count,data_ptr,input_blobs[0]->mutable_cpu_data());
    break;
  default:
    LOG(ERROR) << "Unknow caffe mode";
  }
  delete[] _data_ptr;

  LOG(INFO) << "Extracting features begin (total layers " << feat_nums << ")"; 
  //const vector<Blob<float>*>& output_blobs = 
  caffe_net.ForwardPrefilled();  // Forward
  for (int i=0; i<feat_nums; ++i) {
    const shared_ptr<Blob<float> > feature_blob = caffe_net.blob_by_name(input_params.blobs[i]);

    LOG(INFO) << "Extracting " << input_params.blobs[i] << " ("
      << feature_blob->channels()*feature_blob->num() << " Mat)";

    vector<Mat> mat_feature;
    blob2mat(feature_blob, mat_feature);
    // Save to disk
    for (int k=0; k<mat_feature.size(); ++k) {
      char sn[128];
      sprintf(sn,"%s%s_%.4d.png",input_params.wrRoot.c_str(),input_params.wrPrefix[i].c_str(),k);
      imwrite(sn,mat_feature[k]);
    }
  }
  LOG(INFO) << "Extracting features end";
}
void cv::split(const Mat& m, vector<Mat>& mv)
{
    mv.resize(!m.empty() ? m.channels() : 0);
    if(!m.empty())
        split(m, &mv[0]);
}
Example #5
0
void guiAlphaBlend(const Mat& src1, const Mat& src2)
{
	showMatInfo(src1,"src1");
	cout<<endl;
	showMatInfo(src2,"src2");

	double minv,maxv;
	minMaxLoc(src1, &minv, &maxv);
	bool isNormirized = (maxv<=1.0 &&minv>=0.0) ? true:false;
	Mat s1,s2;

	if(src1.depth()==CV_8U || src1.depth()==CV_32F)
	{
		if(src1.channels()==1)cvtColor(src1,s1,CV_GRAY2BGR);
		else s1 = src1;
		if(src2.channels()==1)cvtColor(src2,s2,CV_GRAY2BGR);
		else s2 = src2;
	}
	else
	{
		Mat ss1,ss2;
		src1.convertTo(ss1,CV_32F);
		src2.convertTo(ss2,CV_32F);

		if(src1.channels()==1)cvtColor(ss1,s1,CV_GRAY2BGR);
		else s1 = ss1.clone();
		if(src2.channels()==1)cvtColor(ss2,s2,CV_GRAY2BGR);
		else s2 = ss2.clone();
	}
	namedWindow("alphaBlend");
	int a = 0;
	createTrackbar("a","alphaBlend",&a,100);
	int key = 0;
	Mat show;
	while(key!='q')
	{	
		addWeighted(s1,1.0-a/100.0,s2,a/100.0,0.0,show);

		if(show.depth()==CV_8U)
		{
			imshow("alphaBlend",show);
		}
		else
		{
			if(isNormirized)
			{
				imshow("alphaBlend",show);
			}
			else
			{
				minMaxLoc(show, &minv, &maxv);

				Mat s;
				if(maxv<=255)
					show.convertTo(s,CV_8U);
				else
					show.convertTo(s,CV_8U,255/maxv);

				imshow("alphaBlend",s);
			}
		}
		key = waitKey(1);
		if(key=='f')
		{
			a = (a > 0) ? 0 : 100;
			setTrackbarPos("a","alphaBlend",a);
		}
	}
	destroyWindow("alphaBlend");
}
Example #6
0
//knock point detection in HSV space, using Hue and Saturation
PTS32 _getKnockMask(Mat& srcImg, Mat& dstImg,PTSysEnum&eBoard)
{
    PTDEBUG("Enter %s\n", __FUNCTION__);
	//select the right desktop corner threshold according to Ipad
	int RightDesktopConerthreshold = 0;
	switch(eBoard) {
      case PT_APPLE_IPAD2:
      case PT_APPLE_IPAD3:
      case PT_APPLE_IPAD4: {
           RightDesktopConerthreshold = 30; 
           break;
           }
      case PT_APPLE_MINI1: {
           RightDesktopConerthreshold = 40; 
           break;
           }
      case PT_APPLE_MINI2: {
           RightDesktopConerthreshold = 40; 
           break;
           }
      case PT_APPLE_MINI3: {
           RightDesktopConerthreshold = 30; 
           break;
           }
      case PT_APPLE_AIR  : {
           RightDesktopConerthreshold = 50;
           break;
           }
      case PT_APPLE_AIR2 : {
           RightDesktopConerthreshold = 50;
           break;
           }
      default: {
            RightDesktopConerthreshold = 30; 
           break;
      }
    }

    Mat temp(srcImg.size(), CV_8UC3);
    cvtColor(srcImg, temp, CV_RGB2HSV);

    vector<Mat> hsv;
    split(temp, hsv);
    Mat hueImg = hsv[0];
    Mat satImg = hsv[1];

    const int hueMin = KNOCKRANGE[0][0];
    const int hueMax = KNOCKRANGE[0][1];
    const int satMin = KNOCKRANGE[0][2];
    const int satMax = KNOCKRANGE[0][3];

    const int hueMin_ = KNOCKRANGE[1][0];
    const int hueMax_ = KNOCKRANGE[1][1];
    const int satMin_ = KNOCKRANGE[1][2];
    const int satMax_ = KNOCKRANGE[1][3];

    PTDEBUG("hueMin[%d], hueMax[%d], satMin[%d], satMax[%d]\n", hueMin, hueMax, satMin, satMax);

    const int rows = hueImg.rows;
    const int cols = hueImg.cols;
    const int channels = hueImg.channels();

    for(int i = 0; i < rows; i++) {
        const uchar* pHue = hueImg.ptr<uchar>(i);
        const uchar* pSat = satImg.ptr<uchar>(i);
        uchar* pDst = dstImg.ptr<uchar>(i);
        for(int j = 0; j < cols; j += channels) {
            const PTU8 hue = pHue[j];
            const PTU8 sat = pSat[j];
            if((cols - j + i > RightDesktopConerthreshold/*exclude top right desktop corner*/)
            && ((hueMin<=hue && hue<=hueMax && satMin<=sat && sat<=satMax) || (hueMin_<=hue && hue<=hueMax_ && satMin_<=sat && sat<=satMax_))) {
                pDst[j] = 255;
            } else {
                pDst[j] = 0;
            }
        }
    }

    //dilate(dstImg, dstImg, Mat());
    dilate(dstImg, dstImg, cv::Mat(), cv::Point(-1, -1), 1);
    PTDEBUG("Exit %s\n", __FUNCTION__);

    return PT_RET_OK;
}
Example #7
0
bool  PngEncoder::write( const Mat& img, const Vector<int>& params )
{
    int compression_level = 0;

    for( size_t i = 0; i < params.size(); i += 2 )
    {
        if( params[i] == CV_IMWRITE_PNG_COMPRESSION )
        {
            compression_level = params[i+1];
            compression_level = MIN(MAX(compression_level, 0), MAX_MEM_LEVEL);
        }
    }

    png_structp png_ptr = png_create_write_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 );
    png_infop info_ptr = 0;
    FILE* f = 0;
    int y, width = img.cols, height = img.rows;
    int depth = img.depth(), channels = img.channels();
    bool result = false;
    AutoBuffer<uchar*> buffer;

    if( depth != CV_8U && depth != CV_16U )
        return false;

    if( png_ptr )
    {
        info_ptr = png_create_info_struct( png_ptr );

        if( info_ptr )
        {
            if( setjmp( png_ptr->jmpbuf ) == 0 )
            {
                if( m_buf )
                {
                    png_set_write_fn(png_ptr, this,
                        (png_rw_ptr)writeDataToBuf, (png_flush_ptr)flushBuf);
                }
                else
                {
                    f = fopen( m_filename.c_str(), "wb" );
                    if( f )
                        png_init_io( png_ptr, f );
                }

                if( m_buf || f )
                {
                    if( compression_level > 0 )
                    {
                        png_set_compression_mem_level( png_ptr, compression_level );
                    }
                    else
                    {
                        // tune parameters for speed
                        // (see http://wiki.linuxquestions.org/wiki/Libpng)
                        png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_SUB);
                        png_set_compression_level(png_ptr, Z_BEST_SPEED);
                    }
                    png_set_compression_strategy(png_ptr, Z_HUFFMAN_ONLY);

                    png_set_IHDR( png_ptr, info_ptr, width, height, depth == CV_8U ? 8 : 16,
                        channels == 1 ? PNG_COLOR_TYPE_GRAY :
                        channels == 3 ? PNG_COLOR_TYPE_RGB : PNG_COLOR_TYPE_RGBA,
                        PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,
                        PNG_FILTER_TYPE_DEFAULT );

                    png_write_info( png_ptr, info_ptr );

                    png_set_bgr( png_ptr );
                    if( !isBigEndian() )
                        png_set_swap( png_ptr );

                    buffer.allocate(height);
                    for( y = 0; y < height; y++ )
                        buffer[y] = img.data + y*img.step;

                    png_write_image( png_ptr, buffer );
                    png_write_end( png_ptr, info_ptr );

                    result = true;
                }
            }
        }
    }

    png_destroy_write_struct( &png_ptr, &info_ptr );
    if(f) fclose( f );

    return result;
}
Example #8
0
 void write(std::ostream& out, const Mat& m, const int*, int) const
 {
     writeMat(out, m, ' ', ' ', m.rows*m.channels() == 1);
     if(m.rows > 1)
         out << "\n";
 }
int main(int argc, char** argv)
{

  // KNN neighbours
  int k=1;
  // KdTree with 5 random trees
  flann::KDTreeIndexParams indexParams(5);

  const char *scripcrs[] = {"Lat", "Chi", "Kan", "Kor"};
  vector<string> scripts(scripcrs,scripcrs+4);

  cout << "Load data and Create the Index for Class 1... "; cout.flush();
  //Load data for Latin script
  Ptr<ml::TrainData> data_Latin = ml::TrainData::loadFromCSV(
                                    "Latin_features.csv",0,0);
  Mat feats_Latin = data_Latin->getTrainSamples();
  //cout << "Latin: Loaded " << feats_Latin.rows << " samples, " << feats_Latin.cols 
  //                                  << "-D"<<endl;
  // Create the Index
  flann::Index kdtree_Latin(feats_Latin, indexParams);
  // Save the index
  //kdtree_Latin.save("train/trained_kdtree_index_Latin.fln");
  cout << "done!" << endl; cout.flush();


  cout << "Load data and Create the Index for Class 2... "; cout.flush();
  //Load data for Chinese script
  Ptr<ml::TrainData> data_Chinese = ml::TrainData::loadFromCSV(
                                   "Chinese_features.csv",0,0);
  Mat feats_Chinese = data_Chinese->getTrainSamples();
  //cout << "Chinese: Loaded " << feats_Chinese.rows << " samples, " << feats_Chinese.cols 
  //                                 << "-D"<<endl;
  // Create the Index
  flann::Index kdtree_Chinese(feats_Chinese, indexParams);
  // Save the index
  //kdtree_Chinese.save("train/trained_kdtree_index_Chinese.fln");
  cout << "done!" << endl; cout.flush();


  cout << "Load data and Create the Index for Class 3... "; cout.flush();
  //Load data for Kannada script
  Ptr<ml::TrainData> data_Kannada = ml::TrainData::loadFromCSV(
                                    "Kannada_features.csv",0,0);
  Mat feats_Kannada = data_Kannada->getTrainSamples();
  //cout << "Kannada: Loaded " << feats_Kannada.rows << " samples, " << feats_Kannada.cols 
  //                                  << "-D"<<endl;
  // Create the Index
  flann::Index kdtree_Kannada(feats_Kannada, indexParams);
  // Save the index
  //kdtree_Kannada.save("train/trained_kdtree_index_Kannada.fln");
  cout << "done!" << endl; cout.flush();


  cout << "Load data and Create the Index for Class 4... "; cout.flush();
  //Load data for Korean script
  Ptr<ml::TrainData> data_Korean = ml::TrainData::loadFromCSV(
                                   "Korean_features.csv",0,0);
  Mat feats_Korean = data_Korean->getTrainSamples();
  //cout << "Korean: Loaded " << feats_Korean.rows << " samples, " << feats_Korean.cols 
  //                                 << "-D"<<endl;
  // Create the Index
  flann::Index kdtree_Korean(feats_Korean, indexParams);
  // Save the index
  //kdtree_Korean.save("train/trained_kdtree_index_Korean.fln");
  cout << "done!" << endl; cout.flush();



  /////////////////////////////////////////////////////////////////////////////
  // Compute weights for each feature on each class
  Mat indices_1;
  Mat dists_1_2;
  Mat dists_1_3;
  Mat dists_1_4;

  kdtree_Chinese.knnSearch(feats_Latin, indices_1, dists_1_2, 10, flann::SearchParams(64));
  kdtree_Kannada.knnSearch(feats_Latin, indices_1, dists_1_3, 10, flann::SearchParams(64));
  kdtree_Korean.knnSearch(feats_Latin, indices_1, dists_1_4, 10, flann::SearchParams(64));

  reduce(dists_1_2, dists_1_2, -1, CV_REDUCE_AVG);
  reduce(dists_1_3, dists_1_3, -1, CV_REDUCE_AVG);
  reduce(dists_1_4, dists_1_4, -1, CV_REDUCE_AVG);

  Mat weights_Latin;
  add(dists_1_2,dists_1_3,weights_Latin);
  add(dists_1_4,weights_Latin,weights_Latin);
  weights_Latin = weights_Latin/3;

  kdtree_Latin.knnSearch(feats_Chinese, indices_1, dists_1_2, 10, flann::SearchParams(64));
  kdtree_Kannada.knnSearch(feats_Chinese, indices_1, dists_1_3, 10, flann::SearchParams(64));
  kdtree_Korean.knnSearch(feats_Chinese, indices_1, dists_1_4, 10, flann::SearchParams(64));

  reduce(dists_1_2, dists_1_2, -1, CV_REDUCE_AVG);
  reduce(dists_1_3, dists_1_3, -1, CV_REDUCE_AVG);
  reduce(dists_1_4, dists_1_4, -1, CV_REDUCE_AVG);

  Mat weights_Chinese;
  add(dists_1_2,dists_1_3,weights_Chinese);
  add(dists_1_4,weights_Chinese,weights_Chinese);
  weights_Chinese = weights_Chinese/3;

  kdtree_Chinese.knnSearch(feats_Kannada, indices_1, dists_1_2, 10, flann::SearchParams(64));
  kdtree_Latin.knnSearch(feats_Kannada, indices_1, dists_1_3, 10, flann::SearchParams(64));
  kdtree_Korean.knnSearch(feats_Kannada, indices_1, dists_1_4, 10, flann::SearchParams(64));

  reduce(dists_1_2, dists_1_2, -1, CV_REDUCE_AVG);
  reduce(dists_1_3, dists_1_3, -1, CV_REDUCE_AVG);
  reduce(dists_1_4, dists_1_4, -1, CV_REDUCE_AVG);

  Mat weights_Kannada;
  add(dists_1_2,dists_1_3,weights_Kannada);
  add(dists_1_4,weights_Kannada,weights_Kannada);
  weights_Kannada = weights_Kannada/3;

  kdtree_Chinese.knnSearch(feats_Korean, indices_1, dists_1_2, 10, flann::SearchParams(64));
  kdtree_Kannada.knnSearch(feats_Korean, indices_1, dists_1_3, 10, flann::SearchParams(64));
  kdtree_Latin.knnSearch(feats_Korean, indices_1, dists_1_4, 10, flann::SearchParams(64));

  reduce(dists_1_2, dists_1_2, -1, CV_REDUCE_AVG);
  reduce(dists_1_3, dists_1_3, -1, CV_REDUCE_AVG);
  reduce(dists_1_4, dists_1_4, -1, CV_REDUCE_AVG);

  Mat weights_Korean;
  add(dists_1_2,dists_1_3,weights_Korean);
  add(dists_1_4,weights_Korean,weights_Korean);
  weights_Korean = weights_Korean/3;

  double min_dist, max_dist, minVal, maxVal;
  minMaxLoc(weights_Latin, &minVal, &maxVal);
  min_dist = minVal; max_dist = maxVal;
  minMaxLoc(weights_Chinese, &minVal, &maxVal);
  min_dist = min(min_dist,minVal); max_dist = max(max_dist,maxVal);
  minMaxLoc(weights_Kannada, &minVal, &maxVal);
  min_dist = min(min_dist,minVal); max_dist = max(max_dist,maxVal);
  minMaxLoc(weights_Korean, &minVal, &maxVal);
  min_dist = min(min_dist,minVal); max_dist = max(max_dist,maxVal);

  weights_Latin = (weights_Latin - min_dist) / (max_dist - min_dist);
  weights_Chinese = (weights_Chinese - min_dist) / (max_dist - min_dist);
  weights_Kannada = (weights_Kannada - min_dist) / (max_dist - min_dist);
  weights_Korean = (weights_Korean - min_dist) / (max_dist - min_dist);

  /////////////////////////////////////////////////////////////////////////////
  /////////////////////////////////////////////////////////////////////////////


  //If we have image(s) passed as an argument do to classification using NBNN
  if (argc>2)
  {
    //First extract features
    //Load filters bank and withenning params
    Mat filters, M, P;
    FileStorage fs("first_layer_filters.xml", FileStorage::READ);
    fs["D"] >> filters;
    fs["M"] >> M;
    fs["P"] >> P;
    fs.release();
  
    int src_height  = 64;
    int image_size  = 32;
    int quad_size   = 12;
    int patch_size  = 8;
    int num_quads   = 25; //extract 25 quads (12x12) from each image
    int num_tiles   = 25; //extract 25 patches (8x8) from each quad 
  
    double alpha    = 0.5; //used for feature representation: 
                           //scalar non-linear function z = max(0, |D*a| - alpha)
  
    Mat quad;
    Mat tmp;

    ofstream outfile;
    outfile.open (argv[argc-1]);

    for (int f=1; f<argc-1; f++)
    {
      cout << "Extracting features for image " << argv[f] << " ... "; cout.flush();
      Mat src  = imread(argv[f]);
      if(src.channels() != 3)
        return 0;
      cvtColor(src,src,COLOR_RGB2GRAY);
      int src_width = (src.cols*src_height)/src.rows;
      resize(src,src,Size(src_width,src_height));
  
      Mat query = Mat::zeros(0,1737,CV_64FC1);
  
      // Do sliding window from x=0 to src_width-image_size in three rows (top,middle,bottom)
      for (int y=0; y<=src_height-image_size; y=y+8)
      { 
        for (int x=0; x<=src_width-image_size; x=x+8)
        { 
  
          Mat img;
          src(Rect(x,y,image_size,image_size)).copyTo(img); // img must be 32x32 pixels
  
          vector< vector<double> > data_pool(9); 
          int quad_id = 1;
          for (int q_x=0; q_x<=image_size-quad_size; q_x=q_x+(quad_size/2-1))
          {
            for (int q_y=0; q_y<=image_size-quad_size; q_y=q_y+(quad_size/2-1))
            {
              Rect quad_rect = Rect(q_x,q_y,quad_size,quad_size); 
              img(quad_rect).copyTo(quad);
      
              //start sliding window (8x8) in each tile and store the patch as row in data_pool
              for (int w_x=0; w_x<=quad_size-patch_size; w_x++)
              {
                for (int w_y=0; w_y<=quad_size-patch_size; w_y++)
                {
                  quad(Rect(w_x,w_y,patch_size,patch_size)).copyTo(tmp);
                  tmp = tmp.reshape(0,1);
                  tmp.convertTo(tmp, CV_64F);
                  normalizeAndZCA(tmp,M,P);
                  vector<double> patch;
                  tmp.copyTo(patch);
                  if ((quad_id == 1)||(quad_id == 2)||(quad_id == 6)||(quad_id == 7))
                    data_pool[0].insert(data_pool[0].end(),patch.begin(),patch.end());
                  if ((quad_id == 2)||(quad_id == 7)||(quad_id == 3)||(quad_id == 8)||(quad_id == 4)||(quad_id == 9))
                    data_pool[1].insert(data_pool[1].end(),patch.begin(),patch.end());
                  if ((quad_id == 4)||(quad_id == 9)||(quad_id == 5)||(quad_id == 10))
                    data_pool[2].insert(data_pool[2].end(),patch.begin(),patch.end());
                  if ((quad_id == 6)||(quad_id == 11)||(quad_id == 16)||(quad_id == 7)||(quad_id == 12)||(quad_id == 17))
                    data_pool[3].insert(data_pool[3].end(),patch.begin(),patch.end());
                  if ((quad_id == 7)||(quad_id == 12)||(quad_id == 17)||(quad_id == 8)||(quad_id == 13)||(quad_id == 18)||(quad_id == 9)||(quad_id == 14)||(quad_id == 19))
                    data_pool[4].insert(data_pool[4].end(),patch.begin(),patch.end());
                  if ((quad_id == 9)||(quad_id == 14)||(quad_id == 19)||(quad_id == 10)||(quad_id == 15)||(quad_id == 20))
                    data_pool[5].insert(data_pool[5].end(),patch.begin(),patch.end());
                  if ((quad_id == 16)||(quad_id == 21)||(quad_id == 17)||(quad_id == 22))
                    data_pool[6].insert(data_pool[6].end(),patch.begin(),patch.end());
                  if ((quad_id == 17)||(quad_id == 22)||(quad_id == 18)||(quad_id == 23)||(quad_id == 19)||(quad_id == 24))
                    data_pool[7].insert(data_pool[7].end(),patch.begin(),patch.end());
                  if ((quad_id == 19)||(quad_id == 24)||(quad_id == 20)||(quad_id == 25))
                    data_pool[8].insert(data_pool[8].end(),patch.begin(),patch.end());
                }
              }
      
              quad_id++;
            }
          }
      
          //do dot product of each normalized and whitened patch 
          //each pool is averaged and this yields a representation of 9xD 
          Mat feature = Mat::zeros(9,filters.rows,CV_64FC1);
          for (int i=0; i<9; i++)
          {
            Mat pool = Mat(data_pool[i]);
            pool = pool.reshape(0,data_pool[i].size()/filters.cols);
            for (int p=0; p<pool.rows; p++)
            {
              for (int f=0; f<filters.rows; f++)
              {
                feature.row(i).at<double>(0,f) = feature.row(i).at<double>(0,f) + max(0.0,std::abs(pool.row(p).dot(filters.row(f)))-alpha);
              }
            }
          }
          feature = feature.reshape(0,1);
          query.push_back(feature);
  
        }
      }
      //cout << "Extracted " << query.rows << " samples, " << query.cols << "-D"<<endl;
   
      query.convertTo(query, feats_Latin.type());
      //cout << feats_Latin.type() << endl;
      //cout << query.type() << endl;
   

      vector<double> I2Cdistances(4,0);
   
      // Batch: Call knnSearch
      Mat indices;
      Mat dists;
     
      kdtree_Latin.knnSearch(query, indices, dists, k, flann::SearchParams(64));
      for(int row = 0 ; row < indices.rows ; row++)
        for(int col = 0 ; col < indices.cols ; col++)
          I2Cdistances[0] += dists.at<float>(row,col) * 
                             (1 - weights_Latin.at<float>(indices.at<float>(row,col),0));
      //cout << "Image To Class (Latin) Distance:: "<< I2Cdistances[0] << endl;
     
      kdtree_Chinese.knnSearch(query, indices, dists, k, flann::SearchParams(64));
      for(int row = 0 ; row < indices.rows ; row++)
        for(int col = 0 ; col < indices.cols ; col++)
          I2Cdistances[1] += dists.at<float>(row,col) *
                             (1 - weights_Chinese.at<float>(indices.at<float>(row,col),0));
      //cout << "Image To Class (Chinese) Distance:: "<< I2Cdistances[1] << endl;
     
      kdtree_Kannada.knnSearch(query, indices, dists, k, flann::SearchParams(64));
      for(int row = 0 ; row < indices.rows ; row++)
        for(int col = 0 ; col < indices.cols ; col++)
          I2Cdistances[2] += dists.at<float>(row,col) *
                             (1 - weights_Kannada.at<float>(indices.at<float>(row,col),0));
      //cout << "Image To Class (Kannada) Distance:: "<< I2Cdistances[2] << endl;
     
      kdtree_Korean.knnSearch(query, indices, dists, k, flann::SearchParams(64));
      for(int row = 0 ; row < indices.rows ; row++)
        for(int col = 0 ; col < indices.cols ; col++)
          I2Cdistances[3] += dists.at<float>(row,col) * 
                             (1 - weights_Korean.at<float>(indices.at<float>(row,col),0));
      //cout << "Image To Class (Korean) Distance:: "<< I2Cdistances[3] << endl;
     

      //Classify image
      double minVal,maxVal;
      Point minLoc,maxLoc;
      minMaxLoc(I2Cdistances, &minVal, &maxVal, &minLoc, &maxLoc);
      //cout << minLoc << endl;
      //cout << "Predicted Script: " << scripts[minLoc.x] << endl;
      //cout << minLoc.x << endl;
      //outfile << argv[f] << "|" << scripts[minLoc.x] << endl;
      outfile << argv[f] << " " << minLoc.x << endl;
     

      cout << "done!" << endl; cout.flush();
    } // end foreach input image

  } //fi if argc>1
Example #10
0
void meanStdDev(const Mat& m, Scalar& mean, Scalar& stddev, const Mat& mask) {
    static MeanStdDevFunc tab[] = {
        meanStdDev_<SqrC1<uchar, double> >, 0,
        meanStdDev_<SqrC1<ushort, double> >,
        meanStdDev_<SqrC1<short, double> >,
        meanStdDev_<SqrC1<int, double> >,
        meanStdDev_<SqrC1<float, double> >,
        meanStdDev_<SqrC1<double, double> >, 0,

        meanStdDev_<SqrC2<uchar, double> >, 0,
        meanStdDev_<SqrC2<ushort, double> >,
        meanStdDev_<SqrC2<short, double> >,
        meanStdDev_<SqrC2<int, double> >,
        meanStdDev_<SqrC2<float, double> >,
        meanStdDev_<SqrC2<double, double> >, 0,

        meanStdDev_<SqrC3<uchar, double> >, 0,
        meanStdDev_<SqrC3<ushort, double> >,
        meanStdDev_<SqrC3<short, double> >,
        meanStdDev_<SqrC3<int, double> >,
        meanStdDev_<SqrC3<float, double> >,
        meanStdDev_<SqrC3<double, double> >, 0,

        meanStdDev_<SqrC4<uchar, double> >, 0,
        meanStdDev_<SqrC4<ushort, double> >,
        meanStdDev_<SqrC4<short, double> >,
        meanStdDev_<SqrC4<int, double> >,
        meanStdDev_<SqrC4<float, double> >,
        meanStdDev_<SqrC4<double, double> >, 0
    };

    static MeanStdDevMaskFunc mtab[] = {
        meanStdDevMask_<SqrC1<uchar, double> >, 0,
        meanStdDevMask_<SqrC1<ushort, double> >,
        meanStdDevMask_<SqrC1<short, double> >,
        meanStdDevMask_<SqrC1<int, double> >,
        meanStdDevMask_<SqrC1<float, double> >,
        meanStdDevMask_<SqrC1<double, double> >, 0,

        meanStdDevMask_<SqrC2<uchar, double> >, 0,
        meanStdDevMask_<SqrC2<ushort, double> >,
        meanStdDevMask_<SqrC2<short, double> >,
        meanStdDevMask_<SqrC2<int, double> >,
        meanStdDevMask_<SqrC2<float, double> >,
        meanStdDevMask_<SqrC2<double, double> >, 0,

        meanStdDevMask_<SqrC3<uchar, double> >, 0,
        meanStdDevMask_<SqrC3<ushort, double> >,
        meanStdDevMask_<SqrC3<short, double> >,
        meanStdDevMask_<SqrC3<int, double> >,
        meanStdDevMask_<SqrC3<float, double> >,
        meanStdDevMask_<SqrC3<double, double> >, 0,

        meanStdDevMask_<SqrC4<uchar, double> >, 0,
        meanStdDevMask_<SqrC4<ushort, double> >,
        meanStdDevMask_<SqrC4<short, double> >,
        meanStdDevMask_<SqrC4<int, double> >,
        meanStdDevMask_<SqrC4<float, double> >,
        meanStdDevMask_<SqrC4<double, double> >, 0
    };

    CV_Assert(m.channels() <= 4);

    if (!mask.data) {
        MeanStdDevFunc func = tab[m.type()];
        CV_Assert(func != 0);
        func(m, mean, stddev);
    } else {
        MeanStdDevMaskFunc func = mtab[m.type()];
        CV_Assert(mask.size() == m.size() && mask.type() == CV_8U && func != 0);
        func(m, mask, mean, stddev);
    }
}
Example #11
0
 void write(std::ostream& out, const Mat& m, const int*, int) const
 {
     out << "[";
     writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.rows*m.channels() == 1);
     out << "]";
 }
Example #12
0
void crossCorr( const Mat& img, const Mat& _templ, Mat& corr,
                Size corrsize, int ctype,
                Point anchor, double delta, int borderType )
{
    const double blockScale = 4.5;
    const int minBlockSize = 256;
    std::vector<uchar> buf;

    Mat templ = _templ;
    int depth = img.depth(), cn = img.channels();
    int tdepth = templ.depth(), tcn = templ.channels();
    int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype);

    CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 );

    if( depth != tdepth && tdepth != std::max(CV_32F, depth) )
    {
        _templ.convertTo(templ, std::max(CV_32F, depth));
        tdepth = templ.depth();
    }

    CV_Assert( depth == tdepth || tdepth == CV_32F);
    CV_Assert( corrsize.height <= img.rows + templ.rows - 1 &&
               corrsize.width <= img.cols + templ.cols - 1 );

    CV_Assert( ccn == 1 || delta == 0 );

    corr.create(corrsize, ctype);

    int maxDepth = depth > CV_8S ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth);
    Size blocksize, dftsize;

    blocksize.width = cvRound(templ.cols*blockScale);
    blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 );
    blocksize.width = std::min( blocksize.width, corr.cols );
    blocksize.height = cvRound(templ.rows*blockScale);
    blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 );
    blocksize.height = std::min( blocksize.height, corr.rows );

    dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2);
    dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1);
    if( dftsize.width <= 0 || dftsize.height <= 0 )
        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );

    // recompute block size
    blocksize.width = dftsize.width - templ.cols + 1;
    blocksize.width = MIN( blocksize.width, corr.cols );
    blocksize.height = dftsize.height - templ.rows + 1;
    blocksize.height = MIN( blocksize.height, corr.rows );

    Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth );
    Mat dftImg( dftsize, maxDepth );

    int i, k, bufSize = 0;
    if( tcn > 1 && tdepth != maxDepth )
        bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth);

    if( cn > 1 && depth != maxDepth )
        bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)*
            (blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth));

    if( (ccn > 1 || cn > 1) && cdepth != maxDepth )
        bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth));

    buf.resize(bufSize);

    // compute DFT of each template plane
    for( k = 0; k < tcn; k++ )
    {
        int yofs = k*dftsize.height;
        Mat src = templ;
        Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height));
        Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows));

        if( tcn > 1 )
        {
            src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]);
            int pairs[] = {k, 0};
            mixChannels(&templ, 1, &src, 1, pairs, 1);
        }

        if( dst1.data != src.data )
            src.convertTo(dst1, dst1.depth());

        if( dst.cols > templ.cols )
        {
            Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols));
            part = Scalar::all(0);
        }
        dft(dst, dst, 0, templ.rows);
    }

    int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width;
    int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height;
    int tileCount = tileCountX * tileCountY;

    Size wholeSize = img.size();
    Point roiofs(0,0);
    Mat img0 = img;

    if( !(borderType & BORDER_ISOLATED) )
    {
        img.locateROI(wholeSize, roiofs);
        img0.adjustROI(roiofs.y, wholeSize.height-img.rows-roiofs.y,
                       roiofs.x, wholeSize.width-img.cols-roiofs.x);
    }
    borderType |= BORDER_ISOLATED;

    // calculate correlation by blocks
    for( i = 0; i < tileCount; i++ )
    {
        int x = (i%tileCountX)*blocksize.width;
        int y = (i/tileCountX)*blocksize.height;

        Size bsz(std::min(blocksize.width, corr.cols - x),
                 std::min(blocksize.height, corr.rows - y));
        Size dsz(bsz.width + templ.cols - 1, bsz.height + templ.rows - 1);
        int x0 = x - anchor.x + roiofs.x, y0 = y - anchor.y + roiofs.y;
        int x1 = std::max(0, x0), y1 = std::max(0, y0);
        int x2 = std::min(img0.cols, x0 + dsz.width);
        int y2 = std::min(img0.rows, y0 + dsz.height);
        Mat src0(img0, Range(y1, y2), Range(x1, x2));
        Mat dst(dftImg, Rect(0, 0, dsz.width, dsz.height));
        Mat dst1(dftImg, Rect(x1-x0, y1-y0, x2-x1, y2-y1));
        Mat cdst(corr, Rect(x, y, bsz.width, bsz.height));

        for( k = 0; k < cn; k++ )
        {
            Mat src = src0;
            dftImg = Scalar::all(0);

            if( cn > 1 )
            {
                src = depth == maxDepth ? dst1 : Mat(y2-y1, x2-x1, depth, &buf[0]);
                int pairs[] = {k, 0};
                mixChannels(&src0, 1, &src, 1, pairs, 1);
            }

            if( dst1.data != src.data )
                src.convertTo(dst1, dst1.depth());

            if( x2 - x1 < dsz.width || y2 - y1 < dsz.height )
                copyMakeBorder(dst1, dst, y1-y0, dst.rows-dst1.rows-(y1-y0),
                               x1-x0, dst.cols-dst1.cols-(x1-x0), borderType);

            dft( dftImg, dftImg, 0, dsz.height );
            Mat dftTempl1(dftTempl, Rect(0, tcn > 1 ? k*dftsize.height : 0,
                                         dftsize.width, dftsize.height));
            mulSpectrums(dftImg, dftTempl1, dftImg, 0, true);
            dft( dftImg, dftImg, DFT_INVERSE + DFT_SCALE, bsz.height );

            src = dftImg(Rect(0, 0, bsz.width, bsz.height));

            if( ccn > 1 )
            {
                if( cdepth != maxDepth )
                {
                    Mat plane(bsz, cdepth, &buf[0]);
                    src.convertTo(plane, cdepth, 1, delta);
                    src = plane;
                }
                int pairs[] = {0, k};
                mixChannels(&src, 1, &cdst, 1, pairs, 1);
            }
            else
            {
                if( k == 0 )
                    src.convertTo(cdst, cdepth, 1, delta);
                else
                {
                    if( maxDepth != cdepth )
                    {
                        Mat plane(bsz, cdepth, &buf[0]);
                        src.convertTo(plane, cdepth);
                        src = plane;
                    }
                    add(src, cdst, cdst);
                }
            }
        }
    }
}
Example #13
0
void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method )
{
    CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED );

    int numType = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 :
                  method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2;
    bool isNormed = method == CV_TM_CCORR_NORMED ||
                    method == CV_TM_SQDIFF_NORMED ||
                    method == CV_TM_CCOEFF_NORMED;

    Mat img = _img.getMat(), templ = _templ.getMat();
    if( img.rows < templ.rows || img.cols < templ.cols )
        std::swap(img, templ);

    CV_Assert( (img.depth() == CV_8U || img.depth() == CV_32F) &&
               img.type() == templ.type() );

    Size corrSize(img.cols - templ.cols + 1, img.rows - templ.rows + 1);
    _result.create(corrSize, CV_32F);
    Mat result = _result.getMat();

    int cn = img.channels();
    crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0);

    if( method == CV_TM_CCORR )
        return;

    double invArea = 1./((double)templ.rows * templ.cols);

    Mat sum, sqsum;
    Scalar templMean, templSdv;
    double *q0 = 0, *q1 = 0, *q2 = 0, *q3 = 0;
    double templNorm = 0, templSum2 = 0;

    if( method == CV_TM_CCOEFF )
    {
        integral(img, sum, CV_64F);
        templMean = mean(templ);
    }
    else
    {
        integral(img, sum, sqsum, CV_64F);
        meanStdDev( templ, templMean, templSdv );

        templNorm = CV_SQR(templSdv[0]) + CV_SQR(templSdv[1]) +
                    CV_SQR(templSdv[2]) + CV_SQR(templSdv[3]);

        if( templNorm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED )
        {
            result = Scalar::all(1);
            return;
        }

        templSum2 = templNorm +
                     CV_SQR(templMean[0]) + CV_SQR(templMean[1]) +
                     CV_SQR(templMean[2]) + CV_SQR(templMean[3]);

        if( numType != 1 )
        {
            templMean = Scalar::all(0);
            templNorm = templSum2;
        }

        templSum2 /= invArea;
        templNorm = sqrt(templNorm);
        templNorm /= sqrt(invArea); // care of accuracy here

        q0 = (double*)sqsum.data;
        q1 = q0 + templ.cols*cn;
        q2 = (double*)(sqsum.data + templ.rows*sqsum.step);
        q3 = q2 + templ.cols*cn;
    }

    double* p0 = (double*)sum.data;
    double* p1 = p0 + templ.cols*cn;
    double* p2 = (double*)(sum.data + templ.rows*sum.step);
    double* p3 = p2 + templ.cols*cn;

    int sumstep = sum.data ? (int)(sum.step / sizeof(double)) : 0;
    int sqstep = sqsum.data ? (int)(sqsum.step / sizeof(double)) : 0;

    int i, j, k;

    for( i = 0; i < result.rows; i++ )
    {
        float* rrow = (float*)(result.data + i*result.step);
        int idx = i * sumstep;
        int idx2 = i * sqstep;

        for( j = 0; j < result.cols; j++, idx += cn, idx2 += cn )
        {
            double num = rrow[j], t;
            double wndMean2 = 0, wndSum2 = 0;

            if( numType == 1 )
            {
                for( k = 0; k < cn; k++ )
                {
                    t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k];
                    wndMean2 += CV_SQR(t);
                    num -= t*templMean[k];
                }

                wndMean2 *= invArea;
            }

            if( isNormed || numType == 2 )
            {
                for( k = 0; k < cn; k++ )
                {
                    t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k];
                    wndSum2 += t;
                }

                if( numType == 2 )
                    num = wndSum2 - 2*num + templSum2;
            }

            if( isNormed )
            {
                t = sqrt(MAX(wndSum2 - wndMean2,0))*templNorm;
                if( fabs(num) < t )
                    num /= t;
                else if( fabs(num) < t*1.125 )
                    num = num > 0 ? 1 : -1;
                else
                    num = method != CV_TM_SQDIFF_NORMED ? 0 : 1;
            }

            rrow[j] = (float)num;
        }
    }
}
Example #14
0
static void divSpectrums( InputArray _srcA, InputArray _srcB, OutputArray _dst, int flags, bool conjB)
{
    Mat srcA = _srcA.getMat(), srcB = _srcB.getMat();
    int depth = srcA.depth(), cn = srcA.channels(), type = srcA.type();
    int rows = srcA.rows, cols = srcA.cols;
    int j, k;

    CV_Assert( type == srcB.type() && srcA.size() == srcB.size() );
    CV_Assert( type == CV_32FC1 || type == CV_32FC2 || type == CV_64FC1 || type == CV_64FC2 );

    _dst.create( srcA.rows, srcA.cols, type );
    Mat dst = _dst.getMat();

    bool is_1d = (flags & DFT_ROWS) || (rows == 1 || (cols == 1 &&
             srcA.isContinuous() && srcB.isContinuous() && dst.isContinuous()));

    if( is_1d && !(flags & DFT_ROWS) )
        cols = cols + rows - 1, rows = 1;

    int ncols = cols*cn;
    int j0 = cn == 1;
    int j1 = ncols - (cols % 2 == 0 && cn == 1);

    if( depth == CV_32F )
    {
        const float* dataA = srcA.ptr<float>();
        const float* dataB = srcB.ptr<float>();
        float* dataC = dst.ptr<float>();
        float eps = FLT_EPSILON; // prevent div0 problems

        size_t stepA = srcA.step/sizeof(dataA[0]);
        size_t stepB = srcB.step/sizeof(dataB[0]);
        size_t stepC = dst.step/sizeof(dataC[0]);

        if( !is_1d && cn == 1 )
        {
            for( k = 0; k < (cols % 2 ? 1 : 2); k++ )
            {
                if( k == 1 )
                    dataA += cols - 1, dataB += cols - 1, dataC += cols - 1;
                dataC[0] = dataA[0] / (dataB[0] + eps);
                if( rows % 2 == 0 )
                    dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA] / (dataB[(rows-1)*stepB] + eps);
                if( !conjB )
                    for( j = 1; j <= rows - 2; j += 2 )
                    {
                        double denom = (double)dataB[j*stepB]*dataB[j*stepB] +
                                       (double)dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + (double)eps;

                        double re = (double)dataA[j*stepA]*dataB[j*stepB] +
                                    (double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB];

                        double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] -
                                    (double)dataA[j*stepA]*dataB[(j+1)*stepB];

                        dataC[j*stepC] = (float)(re / denom);
                        dataC[(j+1)*stepC] = (float)(im / denom);
                    }
                else
                    for( j = 1; j <= rows - 2; j += 2 )
                    {

                        double denom = (double)dataB[j*stepB]*dataB[j*stepB] +
                                       (double)dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + (double)eps;

                        double re = (double)dataA[j*stepA]*dataB[j*stepB] -
                                    (double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB];

                        double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] +
                                    (double)dataA[j*stepA]*dataB[(j+1)*stepB];

                        dataC[j*stepC] = (float)(re / denom);
                        dataC[(j+1)*stepC] = (float)(im / denom);
                    }
                if( k == 1 )
                    dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1;
            }
        }

        for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC )
        {
            if( is_1d && cn == 1 )
            {
                dataC[0] = dataA[0] / (dataB[0] + eps);
                if( cols % 2 == 0 )
                    dataC[j1] = dataA[j1] / (dataB[j1] + eps);
            }

            if( !conjB )
                for( j = j0; j < j1; j += 2 )
                {
                    double denom = (double)(dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps);
                    double re = (double)(dataA[j]*dataB[j] + dataA[j+1]*dataB[j+1]);
                    double im = (double)(dataA[j+1]*dataB[j] - dataA[j]*dataB[j+1]);
                    dataC[j] = (float)(re / denom);
                    dataC[j+1] = (float)(im / denom);
                }
            else
                for( j = j0; j < j1; j += 2 )
                {
                    double denom = (double)(dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps);
                    double re = (double)(dataA[j]*dataB[j] - dataA[j+1]*dataB[j+1]);
                    double im = (double)(dataA[j+1]*dataB[j] + dataA[j]*dataB[j+1]);
                    dataC[j] = (float)(re / denom);
                    dataC[j+1] = (float)(im / denom);
                }
        }
    }
    else
    {
        const double* dataA = srcA.ptr<double>();
        const double* dataB = srcB.ptr<double>();
        double* dataC = dst.ptr<double>();
        double eps = DBL_EPSILON; // prevent div0 problems

        size_t stepA = srcA.step/sizeof(dataA[0]);
        size_t stepB = srcB.step/sizeof(dataB[0]);
        size_t stepC = dst.step/sizeof(dataC[0]);

        if( !is_1d && cn == 1 )
        {
            for( k = 0; k < (cols % 2 ? 1 : 2); k++ )
            {
                if( k == 1 )
                    dataA += cols - 1, dataB += cols - 1, dataC += cols - 1;
                dataC[0] = dataA[0] / (dataB[0] + eps);
                if( rows % 2 == 0 )
                    dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA] / (dataB[(rows-1)*stepB] + eps);
                if( !conjB )
                    for( j = 1; j <= rows - 2; j += 2 )
                    {
                        double denom = dataB[j*stepB]*dataB[j*stepB] +
                                       dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + eps;

                        double re = dataA[j*stepA]*dataB[j*stepB] +
                                    dataA[(j+1)*stepA]*dataB[(j+1)*stepB];

                        double im = dataA[(j+1)*stepA]*dataB[j*stepB] -
                                    dataA[j*stepA]*dataB[(j+1)*stepB];

                        dataC[j*stepC] = re / denom;
                        dataC[(j+1)*stepC] = im / denom;
                    }
                else
                    for( j = 1; j <= rows - 2; j += 2 )
                    {
                        double denom = dataB[j*stepB]*dataB[j*stepB] +
                                       dataB[(j+1)*stepB]*dataB[(j+1)*stepB] + eps;

                        double re = dataA[j*stepA]*dataB[j*stepB] -
                                    dataA[(j+1)*stepA]*dataB[(j+1)*stepB];

                        double im = dataA[(j+1)*stepA]*dataB[j*stepB] +
                                    dataA[j*stepA]*dataB[(j+1)*stepB];

                        dataC[j*stepC] = re / denom;
                        dataC[(j+1)*stepC] = im / denom;
                    }
                if( k == 1 )
                    dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1;
            }
        }

        for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC )
        {
            if( is_1d && cn == 1 )
            {
                dataC[0] = dataA[0] / (dataB[0] + eps);
                if( cols % 2 == 0 )
                    dataC[j1] = dataA[j1] / (dataB[j1] + eps);
            }

            if( !conjB )
                for( j = j0; j < j1; j += 2 )
                {
                    double denom = dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps;
                    double re = dataA[j]*dataB[j] + dataA[j+1]*dataB[j+1];
                    double im = dataA[j+1]*dataB[j] - dataA[j]*dataB[j+1];
                    dataC[j] = re / denom;
                    dataC[j+1] = im / denom;
                }
            else
                for( j = j0; j < j1; j += 2 )
                {
                    double denom = dataB[j]*dataB[j] + dataB[j+1]*dataB[j+1] + eps;
                    double re = dataA[j]*dataB[j] - dataA[j+1]*dataB[j+1];
                    double im = dataA[j+1]*dataB[j] + dataA[j]*dataB[j+1];
                    dataC[j] = re / denom;
                    dataC[j+1] = im / denom;
                }
        }
    }

}
Example #15
0
int main( int argc, char** argv )
{
    Mat img;

    /// Load image
    if( argc != 2 || !(img=imread(argv[1], 1)).data || img.channels()!=3 ) return -1;

    /// Resize (downsize) the image
    double scale = SIZE / ((img.rows>img.cols)?img.rows:img.cols);
    Mat small;
    resize(img, small, small.size(), scale, scale, INTER_AREA);
#ifdef DEBUG
    printf("Source image resized to %d x %d\n", small.cols, small.rows);
#endif

    /// Separate the image in 3 places ( B, G and R )
    vector<Mat> rgbPlanes;
    split( small, rgbPlanes );

    /// Finding the petri-film circle
    // TODO keep time
    std::vector<cv::Point> circle;
    findCircle(rgbPlanes[1], circle);
    if(circle.size() <= 0) {
        printf("Could not find the petri-film circle!\nPlease make sure that 20 percent of center of the image is entirely on the perti-film.\n");
        exit(1);
    }
#ifdef DEBUG
    printf("Petri-film contour: %d points\n", circle.size());
    Mat debugImg1 = small.clone();
    std::vector< std::vector<cv::Point> > debugContours;
    debugContours.push_back(circle);
    drawContours(debugImg1, debugContours, 0, Scalar(0,255,0), 3, 8);
    namedWindow("Found petri-film circle", CV_WINDOW_KEEPRATIO);
    imshow("Found petri-film circle", debugImg1 );
    //waitKey(0);
#endif


    /// Get the petri-film region and circle mask
    Mat petri;
    Mat mask;
    getMask(small, circle, petri, mask);
    vector<Mat> maskPlanes;
    split( mask, maskPlanes );

    /// Separate the image in 3 places ( B, G and R )
    vector<Mat> petriPlanes;
    split( petri, petriPlanes );

#ifdef DEBUG
    /// Histo-Quad
    printf("\nHisto-Quad\n");
#endif

    // TODO Check Background function (instead of mean)
    /// Calculate 4 background colors
    Rect tmpRect;
    Mat tmpROI;
    Mat tmpMask;
    //cv::Scalar tmpMean;
    Scalar tmpBG[4];

    /// Top region
    tmpRect.x = 0;
    tmpRect.y = 0;
    tmpRect.width = petri.cols;
    tmpRect.height = petri.rows / 2;
    tmpROI = Mat(petri, tmpRect);
    tmpMask = Mat(maskPlanes[0], tmpRect);
    tmpBG[0] = mean( tmpROI, tmpMask );

    /// Buttom region
    //tmpRect.x = 0;
    tmpRect.y = petri.rows / 2;
    //tmpRect.width = petri.cols;
    //tmpRect.height = petri.rows / 2;
    tmpROI = Mat(petri, tmpRect);
    tmpMask = Mat(maskPlanes[0], tmpRect);
    tmpBG[1] = mean( tmpROI, tmpMask );

    /// Left region
    //tmpRect.x = 0;
    tmpRect.y = 0;
    tmpRect.width = petri.cols / 2;
    tmpRect.height = petri.rows;
    tmpROI = Mat(petri, tmpRect);
    tmpMask = Mat(maskPlanes[0], tmpRect);
    tmpBG[2] = mean( tmpROI, tmpMask );

    /// Right region
    tmpRect.x = petri.cols / 2;
    //tmpRect.y = 0;
    //tmpRect.width = petri.cols / 2;
    //tmpRect.height = petri.rows;
    tmpROI = Mat(petri, tmpRect);
    tmpMask = Mat(maskPlanes[0], tmpRect);
    tmpBG[3] = mean( tmpROI, tmpMask );

#ifdef DEBUG
    printf("Mean colors of regions (4 sides) of the petri film ( B , G , R):\nBefore Histo-Quad:\n");
    printf("Top: %.1f, %.1f, %.1f\n",    tmpBG[0][0], tmpBG[0][1], tmpBG[0][2]);
    printf("Buttom: %.1f, %.1f, %.1f\n", tmpBG[1][0], tmpBG[1][1], tmpBG[1][2]);
    printf("Left: %.1f, %.1f, %.1f\n",   tmpBG[2][0], tmpBG[2][1], tmpBG[2][2]);
    printf("Right: %.1f, %.1f, %.1f\n",  tmpBG[3][0], tmpBG[3][1], tmpBG[3][2]);

    /*
    Mat debugImg3_b4 = petriPlanes[0].clone();
    namedWindow("B befor Histo-Quad", CV_WINDOW_KEEPRATIO);
    imshow("B befor Histo-Quad", debugImg3_b4 );
    */
#endif

    double m[2][3];
    double l = sqrt(petri.cols*petri.cols + petri.rows*petri.rows)*PI/8;
    double colorConst[2][3];
    // TODO use the max instead of avgBG ?
    double avgBG[3];
    avgBG[0] = (tmpBG[0][0] + tmpBG[1][0] + tmpBG[2][0] + tmpBG[3][0] ) /4;
    avgBG[1] = (tmpBG[0][1] + tmpBG[1][1] + tmpBG[2][1] + tmpBG[3][1] ) /4;
    avgBG[2] = (tmpBG[0][2] + tmpBG[1][2] + tmpBG[2][2] + tmpBG[3][2] ) /4;
    /// Top Left
    m[0][0] = ( tmpBG[0][0] - tmpBG[2][0] ) / l;
    colorConst[0][0] = tmpBG[2][0];
    m[0][1] = ( tmpBG[0][1] - tmpBG[2][1] ) / l;
    colorConst[0][1] = tmpBG[2][1];
    m[0][2] = ( tmpBG[0][2] - tmpBG[2][2] ) / l;
    colorConst[0][2] = tmpBG[2][2];
    /// Buttom Right
    m[1][0] = ( tmpBG[3][0] - tmpBG[1][0] ) / l;
    colorConst[1][0] = tmpBG[1][0];
    m[1][1] = ( tmpBG[3][1] - tmpBG[1][1] ) / l;
    colorConst[1][1] = tmpBG[1][1];
    m[1][2] = ( tmpBG[3][2] - tmpBG[1][2] ) / l;
    colorConst[1][2] = tmpBG[1][2];
    double row0 = petri.rows * (0.5 - PI/8);
    double col0 = petri.cols * (0.5 - PI/8);

    double intColors[2][3];
    for( int row=0; row<petri.rows; row++) {
        uchar* rowStartB = petriPlanes[0].ptr<uchar>(row);
        uchar* rowStartG = petriPlanes[1].ptr<uchar>(row);
        uchar* rowStartR = petriPlanes[2].ptr<uchar>(row);
        double y = row - row0;
        y /= sqrt(2);
        for( int col=0; col<petri.cols; col += 1) {
            double x = col - col0;
            x /= sqrt(2);
            double x2 = x - y + l/2;
            double y2 = x + y - l/2;
            /// Top Left
            intColors[0][0] = m[0][0] * x2 + colorConst[0][0];
            intColors[0][1] = m[0][1] * x2 + colorConst[0][1];
            intColors[0][2] = m[0][2] * x2 + colorConst[0][2];
            /// Buttom Right
            intColors[1][0] = m[1][0] * x2 + colorConst[1][0];
            intColors[1][1] = m[1][1] * x2 + colorConst[1][1];
            intColors[1][2] = m[1][2] * x2 + colorConst[1][2];
            //*(rowStart + col) = intColors[0];
            *(rowStartB + col) += avgBG[0] - ( (intColors[1][0] - intColors[0][0]) / l * y2 + intColors[0][0] );
            *(rowStartG + col) += avgBG[1] - ( (intColors[1][1] - intColors[0][1]) / l * y2 + intColors[0][1] );
            *(rowStartR + col) += avgBG[2] - ( (intColors[1][2] - intColors[0][2]) / l * y2 + intColors[0][2] );
        }
    }

    // FIXME I think we do not need "petri" Mat from here, but anyway......
    merge(petriPlanes, petri);

#ifdef DEBUG
    /// Re-Calculate 4 background colors
    /// Top region
    tmpRect.x = 0;
    tmpRect.y = 0;
    tmpRect.width = petri.cols;
    tmpRect.height = petri.rows / 2;
    tmpROI = Mat(petri, tmpRect);
    tmpMask = Mat(maskPlanes[0], tmpRect);
    tmpBG[0] = mean( tmpROI, tmpMask );

    /// Buttom region
    //tmpRect.x = 0;
    tmpRect.y = petri.rows / 2;
    //tmpRect.width = petri.cols;
    //tmpRect.height = petri.rows / 2;
    tmpROI = Mat(petri, tmpRect);
    tmpMask = Mat(maskPlanes[0], tmpRect);
    tmpBG[1] = mean( tmpROI, tmpMask );

    /// Left region
    //tmpRect.x = 0;
    tmpRect.y = 0;
    tmpRect.width = petri.cols / 2;
    tmpRect.height = petri.rows;
    tmpROI = Mat(petri, tmpRect);
    tmpMask = Mat(maskPlanes[0], tmpRect);
    tmpBG[2] = mean( tmpROI, tmpMask );

    /// Right region
    tmpRect.x = petri.cols / 2;
    //tmpRect.y = 0;
    //tmpRect.width = petri.cols / 2;
    //tmpRect.height = petri.rows;
    tmpROI = Mat(petri, tmpRect);
    tmpMask = Mat(maskPlanes[0], tmpRect);
    tmpBG[3] = mean( tmpROI, tmpMask );

    printf("After Histo-Quad:\n");
    printf("Top: %.1f, %.1f, %.1f\n",    tmpBG[0][0], tmpBG[0][1], tmpBG[0][2]);
    printf("Buttom: %.1f, %.1f, %.1f\n", tmpBG[1][0], tmpBG[1][1], tmpBG[1][2]);
    printf("Left: %.1f, %.1f, %.1f\n",   tmpBG[2][0], tmpBG[2][1], tmpBG[2][2]);
    printf("Right: %.1f, %.1f, %.1f\n",  tmpBG[3][0], tmpBG[3][1], tmpBG[3][2]);
    printf("Histo-Quad ends here\n\n");
    /*
    Mat debugImg3_after = petriPlanes[0].clone();
    namedWindow("B after Histo-Quad", CV_WINDOW_KEEPRATIO);
    imshow("B after Histo-Quad", debugImg3_after );
    */
#endif
    /// Histo-Quad ends here

#ifdef DEBUG
    //namedWindow("Mask", CV_WINDOW_KEEPRATIO);
    //imshow("Mask", mask );
    Mat debugImg2 = petri.clone();
    namedWindow("RIO", CV_WINDOW_KEEPRATIO);
    imshow("RIO", debugImg2 );

    Mat debugImg3_0 = petriPlanes[0].clone();
    namedWindow("B", CV_WINDOW_KEEPRATIO);
    imshow("B", debugImg3_0 );

    Mat debugImg3_1 = petriPlanes[1].clone();
    namedWindow("G", CV_WINDOW_KEEPRATIO);
    imshow("G", debugImg3_1 );

    Mat debugImg3_2 = petriPlanes[2].clone();
    namedWindow("R", CV_WINDOW_KEEPRATIO);
    imshow("R", debugImg3_2 );
    //waitKey(0);
#endif


    /// Compute histograms
    vector<Mat> histogram;
    Scalar BGcolor = computeHistograms(petriPlanes, maskPlanes[0], histogram);

#ifdef DEBUG
    printf("Background color (peaks in histogram) RGB: %.0f,%.0f,%.0f\n",
           BGcolor[2] *255/HIST_BIN_COUNT, BGcolor[1] *255/HIST_BIN_COUNT, BGcolor[0] *255/HIST_BIN_COUNT);
#endif

    /// Find a point on yellow lines
    float yellowB, yellowG = 0; //Initialize as the first color in the histogram, but it should not be as dark as the first color and will check it later
    for( int i = 2; i < BGcolor[0]; i++ ) { //Is not in the same HIST_BIN as BG; it is even darker
        if(  histogram[0].at<float>(i) > histogram[0].at<float>(yellowB)
                && histogram[0].at<float>(i-1) < histogram[0].at<float>(i)
                && histogram[0].at<float>(i) > histogram[0].at<float>(i+1) ) {
            yellowB = i;
#ifdef DEBUG
            printf("Local max B in histogram: %.1f (at bin %d)\n", i*255./HIST_BIN_COUNT, i);
#endif
        }
    }

#ifdef DEBUG
    printf("\tYellow line B bin in histogram: %d\n", yellowB);
#endif

    if( yellowB < 2 || yellowB > HIST_BIN_COUNT-2 ) {//it should not be neither as dark as the first color nor as bright as the last
        printf("Could not find yellow lines!\nPlease make sure that the picture is neither over nor under exposed.\n");
        exit(2);
    }
    yellowB *= 255./HIST_BIN_COUNT;
#ifdef DEBUG
            printf("\tYellow line B: %.1f\n", yellowB);
#endif


    {
        // Color difference between B at a point and the B at yellow lines
        float colorDist = fabs( yellowB - petriPlanes[0].at<uchar>(petri.cols/2, petri.rows/2)); //Initialize at center
        float tmpBdist;
        /// Searching for yellow lines to find G at a point on lines
        //Going to search from 3 pixels from center to 80% of radious on the center line in 2 directions
        //TODO it can be done in 2 separete threads
        uchar* centerPixelB = petriPlanes[0].ptr<uchar>(petri.rows/2) + (petri.cols/2);
        uchar* centerPixelG = petriPlanes[1].ptr<uchar>(petri.rows/2) + (petri.cols/2);
        for(int i=3; i< (petri.cols+petri.rows)/5; i++) {
            tmpBdist = fabs( yellowB - *(centerPixelB + i) );
            if(tmpBdist < colorDist) {
                colorDist = tmpBdist;
                yellowG = *(centerPixelG + i);
            }

            tmpBdist = fabs( yellowB - *(centerPixelB - i) );
            if(tmpBdist < colorDist) {
                colorDist = tmpBdist;
                yellowG = *(centerPixelG - i);
            }
        }
    }
    //FIXME add some assertions
#ifdef DEBUG
    printf("Yellow lines: G=%.1f, B=%.1f\n", yellowG, yellowB);
#endif


    cv::Scalar tmpMean = mean( petri, maskPlanes[0] );
#ifdef DEBUG
    printf("Mean color RGB: %.2f,%.2f,%.2f\n", tmpMean[2], tmpMean[1], tmpMean[0]);
#endif

    // Merging G and B planes to remove yellow lines
    double tmpDeltaG = yellowG - tmpMean[1];
    double lambda = tmpDeltaG / (yellowB - tmpMean[0] - tmpDeltaG);
#ifdef DEBUG
    //tmpDeltaG = yellowG - BGcolor[1];
    //printf("By histogram: %.3f\n", tmpDeltaG / (yellowB - BGcolor[0] - tmpDeltaG));
    //tmpDeltaG = yellowG - tmpMean[1];
    //printf("By Mean: %.3f\n", tmpDeltaG / (yellowG - tmpMean[0] - tmpDeltaG));

    printf("Lambda: %.3f\n", lambda);
#endif

    Mat merge_G, merge_B, merged;
    petriPlanes[0].convertTo(merge_B, CV_64FC1);
    petriPlanes[1].convertTo(merge_G, CV_64FC1);

    if (0.01<lambda && lambda<=0.5) {
        merged = merge_G * (1+lambda) - merge_B * lambda;
#ifdef DEBUG
        Mat debugImg4 ; //= merged.clone();
        namedWindow("Merged", CV_WINDOW_KEEPRATIO);
        merged.convertTo(debugImg4, CV_8UC1);
        imshow("Merged", debugImg4 );
#endif
    } else {
        printf("lambda: %.2f => No yellow line removal\n", lambda);
    }

    /// Create low-pass filter, only within mask
    Mat blurred;
    Mat blurredCount;
    int blursize = (merged.rows / 12) * 2 + 1;
    Mat mergedMask;
    maskPlanes[0].convertTo(mergedMask, CV_64FC1);

    boxFilter(merged & mergedMask, blurred, CV_64FC1, Size(blursize, blursize),
              Point(-1, -1), false, BORDER_CONSTANT);
    boxFilter(mergedMask, blurredCount, CV_64FC1, Size(blursize, blursize),
              Point(-1, -1), false, BORDER_CONSTANT);
    blurred /= blurredCount / 255;

    /// High-pass image
    Mat highpass;
    merged.convertTo(highpass, CV_64FC1);
    highpass = highpass / blurred * 255;
    // Mask outside of circle to background (which has become Scalar(255,255,255))
    highpass.setTo(Scalar(255,255,255), 255 - maskPlanes[0]);


#ifdef DEBUG
    Mat debugImg5;
    highpass.convertTo(debugImg5, CV_8UC1);
    namedWindow("Low-pass / high-pass filtered petri", CV_WINDOW_KEEPRATIO);
    imshow("Low-pass / high-pass filtered petri", debugImg5 );
#endif

    // Convert back to 8-bit
    //Mat highpass8;
    //highpass.convertTo(highpass8, CV_8UC3);





    waitKey(0);

    return 0;
}
Example #16
0
void Dtmove::start(int i ,String pas= "******")
{
    int sorr=0x05,ack;
    clientinit();
    send(sockfd,&sorr,4,0);
    path =pas;
    cap = VideoCapture(i);
    cap >> frame;
    datasize = frame.rows*frame.cols*frame.channels();
    occ = 0;
    color = Scalar( 0, 255, 0);
    element = getStructuringElement( 0,Size( 3, 3 ), Point(1, 1 ) );
    ckcamera();
    //初始化背景帧
    cap >> frame;
    cvtColor(frame, avg, COLOR_BGR2GRAY);
    GaussianBlur(avg, avg, Size(7,7), 1.5, 1.5);
    gray = avg.clone();
    frameold = gray.clone();
    
    while(1)    //主循环
    {
        recvall(sockfd,&ack,4);
        if(ack != 0x77) perror("ack");
        cap >> frame;                                        // 获取一帧图像
        cvtColor(frame, gray, COLOR_BGR2GRAY);               //转化为灰度图像
        GaussianBlur(gray, gray, Size(7,7), 1.5, 1.5);       //高斯模糊
        absdiff(gray,avg,differ);                            //比较两幅图片结果放入differ中
        threshold(differ,thresh, 40, 255, THRESH_BINARY);    //根据给出的阈值二值化
        dilate(thresh,bigger, element,Point(-1,-1), 1);      //膨胀图像
        findContours(bigger,contours, RETR_EXTERNAL,CHAIN_APPROX_SIMPLE);  //寻找轮廓
        for(unsigned int j = 0; j < contours.size(); j++ )
        {
            if (contourArea(contours[j])<1000) continue;
            occ = 1;
            ret = boundingRect(contours[j]);
            rectangle(frame,ret,color,2);
        }
        //重新建立背景
        if (occ==0)
        {
            times = 0;
            mean = cv::mean(differ); //名称空间重复故使用cv::
            //cout<<"m"<<mean[0]<<endl;
            if (mean[0] > 2)
            {
                avg = gray.clone();
            }
        }else
        {
            if (times > 30)
            {
                /*重新建立背景*/
                times = 0;
                absdiff(gray,frameold,frameold); 
                mean = cv::mean(frameold);   //名称空间重复故使用cv::
                //cout<<"f"<<mean[0]<<endl;
                if  ( mean[0] < 2 ) 
                {
                    avg = gray.clone();
                }
                frameold = gray.clone();
            }
            times++;
            occ = 0;
        }
        //显示图片
        sender();
        //imshow("frame", frame);
        //imshow("avg", avg);
        //imshow("thresh", thresh);
        //imshow("differ", differ);

        //key = waitKey(25)&0xFF;
        //if (key == 's')
        //{
            //now_time = time(NULL);
            //p=localtime(&now_time);
            //strftime(fmt_time, sizeof(fmt_time), "%Y_%m_%d_%H_%M_%S", p);
            //svtime = path + format("%s.jpg",fmt_time);
            //cout << svtime << endl;
            //imwrite(svtime,frame);
        //}
        //if(key == 'q') break;
        //timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+ 
        //tpend.tv_usec-tpstart.tv_usec; 
        //timeuse/=1000000; 
        //cout<<timeuse<<endl;
    }

    //destroyWindow("frame");
    cap.release();
    close(sockfd);

}
void App::handleKey(char key)
{
    switch (key)
    {
    case 27:
        running = false;
        break;
    case 'p': case 'P':
        printParams();
        break;  
    case 'g': case 'G':
        if (left.channels() == 1 && p.method != Params::BM)
        {
            left = left_src;
            right = right_src;
        }
        else 
        {
            cvtColor(left_src, left, CV_BGR2GRAY);
            cvtColor(right_src, right, CV_BGR2GRAY);
        }
        d_left = left;
        d_right = right;
        cout << "image_channels: " << left.channels() << endl;
        imshow("left", left);
        imshow("right", right);
        break;
    case 'm': case 'M':
        switch (p.method)
        {
        case Params::BM:
            p.method = Params::BP;
            break;
        case Params::BP:
            p.method = Params::CSBP;
            break;
        case Params::CSBP:
            p.method = Params::BM;
            break;
        }
        cout << "method: " << p.method_str() << endl;
        break;
    case 's': case 'S':
        if (p.method == Params::BM)
        {
            switch (bm.preset)
            {
            case gpu::StereoBM_GPU::BASIC_PRESET:
                bm.preset = gpu::StereoBM_GPU::PREFILTER_XSOBEL;
                break;
            case gpu::StereoBM_GPU::PREFILTER_XSOBEL:
                bm.preset = gpu::StereoBM_GPU::BASIC_PRESET;
                break;
            }
            cout << "prefilter_sobel: " << bm.preset << endl;
        }
        break;
    case '1':
        p.ndisp = p.ndisp == 1 ? 8 : p.ndisp + 8;
        cout << "ndisp: " << p.ndisp << endl;
        bm.ndisp = p.ndisp;
        bp.ndisp = p.ndisp;
        csbp.ndisp = p.ndisp;
        break;
    case 'q': case 'Q':
        p.ndisp = max(p.ndisp - 8, 1);
        cout << "ndisp: " << p.ndisp << endl;
        bm.ndisp = p.ndisp;
        bp.ndisp = p.ndisp;
        csbp.ndisp = p.ndisp;
        break;
    case '2':
        if (p.method == Params::BM)
        {
            bm.winSize = min(bm.winSize + 1, 51);
            cout << "win_size: " << bm.winSize << endl;
        }
        break;
    case 'w': case 'W':
        if (p.method == Params::BM)
        {
            bm.winSize = max(bm.winSize - 1, 2);
            cout << "win_size: " << bm.winSize << endl;
        }
        break;
    case '3':
        if (p.method == Params::BP)
        {
            bp.iters += 1;
            cout << "iter_count: " << bp.iters << endl;
        }
        else if (p.method == Params::CSBP)
        {
            csbp.iters += 1;
            cout << "iter_count: " << csbp.iters << endl;
        }
        break;
    case 'e': case 'E':
        if (p.method == Params::BP)
        {
            bp.iters = max(bp.iters - 1, 1);
            cout << "iter_count: " << bp.iters << endl;
        }
        else if (p.method == Params::CSBP)
        {
            csbp.iters = max(csbp.iters - 1, 1);
            cout << "iter_count: " << csbp.iters << endl;
        }
        break;
    case '4':
        if (p.method == Params::BP)
        {
            bp.levels += 1;
            cout << "level_count: " << bp.levels << endl;
        }
        else if (p.method == Params::CSBP)
        {
            csbp.levels += 1;
            cout << "level_count: " << csbp.levels << endl;
        }
        break;
    case 'r': case 'R':
        if (p.method == Params::BP)
        {
            bp.levels = max(bp.levels - 1, 1);
            cout << "level_count: " << bp.levels << endl;
        }
        else if (p.method == Params::CSBP)
        {
            csbp.levels = max(csbp.levels - 1, 1);
            cout << "level_count: " << csbp.levels << endl;
        }
        break;
    }
}
Example #18
0
static bool IPPDerivScharr(const Mat& src, Mat& dst, int ddepth, int dx, int dy, double scale)
{
   int bufSize = 0;
   cv::AutoBuffer<char> buffer;
   IppiSize roi = ippiSize(src.cols, src.rows);

   if( ddepth < 0 )
     ddepth = src.depth();

   dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) );

   switch(src.type())
   {
      case CV_8U:
         {
            if(scale != 1)
                return false;

            switch(dst.type())
            {
               case CV_16S:
               {
                  if((dx == 1) && (dy == 0))
                  {
                     ippiFilterScharrVertGetBufferSize_8u16s_C1R(roi,&bufSize);
                     buffer.allocate(bufSize);

                     ippiFilterScharrVertBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
                        (Ipp16s*)dst.data, (int)dst.step, roi, ippBorderRepl, 0, (Ipp8u*)(char*)buffer);

                     return true;
                  }

                  if((dx == 0) && (dy == 1))
                  {
                     ippiFilterScharrHorizGetBufferSize_8u16s_C1R(roi,&bufSize);
                     buffer.allocate(bufSize);

                     ippiFilterScharrHorizBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
                        (Ipp16s*)dst.data, (int)dst.step, roi, ippBorderRepl, 0, (Ipp8u*)(char*)buffer);

                     return true;
                  }
               }

               default:
                  return false;
            }
         }

      case CV_32F:
         {
            switch(dst.type())
            {
               case CV_32F:
               if((dx == 1) && (dy == 0))
               {
                  ippiFilterScharrVertGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows),&bufSize);
                  buffer.allocate(bufSize);

                  ippiFilterScharrVertBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
                     (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows),
                                            ippBorderRepl, 0, (Ipp8u*)(char*)buffer);
                  if(scale != 1)
                     /* IPP is fast, so MulC produce very little perf degradation */
                     ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f*)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));

                  return true;
               }

               if((dx == 0) && (dy == 1))
               {
                  ippiFilterScharrHorizGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows),&bufSize);
                  buffer.allocate(bufSize);

                  ippiFilterScharrHorizBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
                     (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows),
                                            ippBorderRepl, 0, (Ipp8u*)(char*)buffer);
                  if(scale != 1)
                     ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));

                  return true;
               }

               default:
                  return false;
            }
         }

      default:
         return false;
   }
}
Example #19
0
Ptr<OnlineCaptureServer::FramePushOutput> OnlineCaptureServer::push(const Mat& _image, const Mat& _depth, int frameID)
{
    Ptr<FramePushOutput> pushOutput = new FramePushOutput();

    CV_Assert(isInitialied);
    CV_Assert(!isFinalized);

    CV_Assert(!normalsComputer.empty());
    CV_Assert(!tableMasker.empty());
    CV_Assert(!odometry.empty());

    if(isTrajectoryBroken)
    {
        cout << "frame " << frameID << ": trajectory was broken starting from keyframe " << (*trajectoryFrames->frames.rbegin())->ID << "." << endl;
        return pushOutput;
    }

    if(isLoopClosed)
    {
        cout << "frame " << frameID << ": loop is already closed" << endl;
        return pushOutput;
    }

    if(_image.empty() || _depth.empty())
    {
        cout << "Warning: Empty frame " << frameID << endl;
        return pushOutput;
    }

    //color information is ingored now but can be used in future
    Mat _grayImage = _image;
    if (_image.channels() == 3)
        cvtColor(_image, _grayImage, CV_BGR2GRAY);

    CV_Assert(_grayImage.type() == CV_8UC1);
    CV_Assert(_depth.type() == CV_32FC1);
    CV_Assert(_grayImage.size() == _depth.size());

    Mat image, depth;
    filterImage(_grayImage, image);
    filterImage(_depth, depth);

    Mat cloud;
    depthTo3d(depth, cameraMatrix, cloud);

    Mat normals = (*normalsComputer)(cloud);

    Mat tableWithObjectMask;
    bool isTableMaskOk = (*tableMasker)(cloud, normals, tableWithObjectMask, &pushOutput->objectMask);
    pushOutput->frame = new OdometryFrame(_grayImage, _depth, tableWithObjectMask, normals, frameID);
    if(!isTableMaskOk)
    {
        cout << "Warning: bad table mask for the frame " << frameID << endl;
        return pushOutput;
    }

    //Ptr<OdometryFrameCache> currFrame = new OdometryFrameCache(image, depth, tableWithObjectMask);
    Ptr<OdometryFrame> currFrame = pushOutput->frame;

    if(lastKeyframe.empty())
    {
        firstKeyframe = currFrame;
        pushOutput->frameState |= TrajectoryFrames::KEYFRAME;
        pushOutput->pose = Mat::eye(4,4,CV_64FC1);
        cout << "First keyframe ID " << frameID << endl;
    }
    else
    {
        // find frame to frame motion transformations
        {
            Mat Rt;
            cout << "odometry " << frameID << " -> " << prevFrameID << endl;
            if(odometry->compute(currFrame, prevFrame, Rt) && computeInliersRatio(currFrame, prevFrame, Rt, cameraMatrix, maxCorrespColorDiff,
                                                                                  maxCorrespDepthDiff) >= minInliersRatio)
            {
                pushOutput->frameState |= TrajectoryFrames::VALIDFRAME;
            }

            pushOutput->pose = prevPose * Rt;
            if((pushOutput->frameState & TrajectoryFrames::VALIDFRAME) != TrajectoryFrames::VALIDFRAME)
            {
                cout << "Warning: Bad odometry (too far motion or low inliers ratio) " << frameID << "->" << prevFrameID << endl;
                return pushOutput;
            }
        }

        // check for the current frame: is it keyframe?
        {
            int lastKeyframePoseIndex = -1;
            for(int i = trajectoryFrames->frames.size() - 1; i >= 0; i--)
            {
                if((trajectoryFrames->frameStates[i] & TrajectoryFrames::KEYFRAME) == TrajectoryFrames::KEYFRAME)
                {
                    lastKeyframePoseIndex = i;
                    break;
                }
            }
            CV_Assert(lastKeyframePoseIndex >= 0);
            Mat Rt = (trajectoryFrames->poses[lastKeyframePoseIndex]).inv(DECOMP_SVD) * pushOutput->pose;
            float tnorm = tvecNorm(Rt);
            float rnorm = rvecNormDegrees(Rt);

            if(tnorm > maxTranslationDiff || rnorm > maxRotationDiff)
            {
                cout << "Camera trajectory is broken (starting from " << (*trajectoryFrames->frames.rbegin())->ID << " frame)." << endl;
                cout << checkDataMessage << endl;
                isTrajectoryBroken = true;
                return pushOutput;
            }

            if((tnorm >= minTranslationDiff || rnorm >= minRotationDiff)) // we don't check inliers ratio here because it was done by frame-to-frame above
            {
                translationSum += tnorm;
                if(isLoopClosing)
                    cout << "possible ";
                cout << "keyframe ID " << frameID << endl;
                pushOutput->frameState |= TrajectoryFrames::KEYFRAME;
            }
        }

        // match with the first keyframe
        if(translationSum > skippedTranslation) // ready for closure
        {
            Mat Rt;
            if(odometry->compute(currFrame, firstKeyframe, Rt))
            {
                // we check inliers ratio for the loop closure frames because we didn't do this before
                float inliersRatio = computeInliersRatio(currFrame, firstKeyframe, Rt, cameraMatrix, maxCorrespColorDiff, maxCorrespDepthDiff);
                if(inliersRatio > minInliersRatio)
                {
                    if(inliersRatio >= closureInliersRatio)
                    {
                        isLoopClosing = true;
                        closureInliersRatio = inliersRatio;
                        closureFrame = currFrame;
                        closureFrameID = frameID;
                        closurePoseWithFirst = Rt;
                        closurePose = pushOutput->pose;
                        closureObjectMask = pushOutput->objectMask;
                        closureBgrImage = _image;
                        isClosureFrameKey = (pushOutput->frameState & TrajectoryFrames::KEYFRAME) == TrajectoryFrames::KEYFRAME;
                    }
                    else if(isLoopClosing)
                    {
                        isLoopClosed = true;
                    }
                }
                else if(isLoopClosing)
                {
                    isLoopClosed = true;
                }
            }
            else if(isLoopClosing)
            {
                isLoopClosed = true;
            }
        }
    }

    if((pushOutput->frameState & trajectoryFrames->resumeFrameState) == trajectoryFrames->resumeFrameState)
    {
        trajectoryFrames->push(new RgbdFrame(_image, _depth, currFrame->mask, currFrame->normals, frameID),
                               pushOutput->pose, pushOutput->objectMask, pushOutput->frameState);

        if((pushOutput->frameState & TrajectoryFrames::KEYFRAME) == TrajectoryFrames::KEYFRAME)
            lastKeyframe = currFrame;
    }

    prevFrame = currFrame;
    prevFrameID = frameID;
    prevPose = pushOutput->pose.clone();

    return pushOutput;
}
Example #20
0
static bool IPPDeriv(const Mat& src, Mat& dst, int ddepth, int dx, int dy, int ksize, double scale)
{
   int bufSize = 0;
   cv::AutoBuffer<char> buffer;

   if(ksize == 3 || ksize == 5)
   {
      if( ddepth < 0 )
          ddepth = src.depth();

      if(src.type() == CV_8U && dst.type() == CV_16S && scale == 1)
      {
         if((dx == 1) && (dy == 0))
         {
            ippiFilterSobelNegVertGetBufferSize_8u16s_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize);
            buffer.allocate(bufSize);

            ippiFilterSobelNegVertBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
               (Ipp16s*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
                                      ippBorderRepl, 0, (Ipp8u*)(char*)buffer);
            return true;
         }

         if((dx == 0) && (dy == 1))
         {
            ippiFilterSobelHorizGetBufferSize_8u16s_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize);
            buffer.allocate(bufSize);

            ippiFilterSobelHorizBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
               (Ipp16s*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
                                      ippBorderRepl, 0, (Ipp8u*)(char*)buffer);

            return true;
         }

         if((dx == 2) && (dy == 0))
         {
            ippiFilterSobelVertSecondGetBufferSize_8u16s_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize);
            buffer.allocate(bufSize);

            ippiFilterSobelVertSecondBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
               (Ipp16s*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
                                      ippBorderRepl, 0, (Ipp8u*)(char*)buffer);

            return true;
         }

         if((dx == 0) && (dy == 2))
         {
            ippiFilterSobelHorizSecondGetBufferSize_8u16s_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize);
            buffer.allocate(bufSize);

            ippiFilterSobelHorizSecondBorder_8u16s_C1R((const Ipp8u*)src.data, (int)src.step,
               (Ipp16s*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
                                      ippBorderRepl, 0, (Ipp8u*)(char*)buffer);

            return true;
         }
      }

      if(src.type() == CV_32F && dst.type() == CV_32F)
      {
         if((dx == 1) && (dy == 0))
         {
            ippiFilterSobelNegVertGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), &bufSize);
            buffer.allocate(bufSize);

            ippiFilterSobelNegVertBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
               (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
                                      ippBorderRepl, 0, (Ipp8u*)(char*)buffer);
            if(scale != 1)
               ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));

            return true;
         }

         if((dx == 0) && (dy == 1))
         {
            ippiFilterSobelHorizGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize);
            buffer.allocate(bufSize);

            ippiFilterSobelHorizBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
               (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
                                      ippBorderRepl, 0, (Ipp8u*)(char*)buffer);
            if(scale != 1)
               ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));

            return true;
         }

         if((dx == 2) && (dy == 0))
         {
            ippiFilterSobelVertSecondGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize);
            buffer.allocate(bufSize);

            ippiFilterSobelVertSecondBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
               (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
                                      ippBorderRepl, 0, (Ipp8u*)(char*)buffer);
            if(scale != 1)
               ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));

            return true;
         }

         if((dx == 0) && (dy == 2))
         {
            ippiFilterSobelHorizSecondGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize);
            buffer.allocate(bufSize);

            ippiFilterSobelHorizSecondBorder_32f_C1R((const Ipp32f*)src.data, (int)src.step,
               (Ipp32f*)dst.data, (int)dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),
                                      ippBorderRepl, 0, (Ipp8u*)(char*)buffer);
            if(scale != 1)
               ippiMulC_32f_C1IR((Ipp32f)scale, (Ipp32f *)dst.data, (int)dst.step, ippiSize(dst.cols*dst.channels(), dst.rows));

            return true;
         }
      }
   }

   if(ksize <= 0)
      return IPPDerivScharr(src, dst, ddepth, dx, dy, scale);

   return false;
}
Example #21
0
/**
 * @brief Contrast::glg
 * @param img
 *
 * Implementation of paper
 * https://ieeexplore.ieee.org/abstract/document/1658093.
 *
 * Works by creating overall histogram of the image, followed by merging in the
 * lesser impactful ranges into neighbouring buckets unless left over with about
 * 20 buckets. The buckets can be customized as per requirements.
 */
void Contrast::glg(Mat img_bgr) {
  int s, scale = 3, n = 0, i, j;
  Mat hist = Mat(Size(256, 1), CV_8UC1);

  Mat hist_glg = Mat(Size(256, 1), CV_8UC1);
  ;

  Size sz = Size(img_bgr.cols & -2, img_bgr.rows & -2);
  Mat gray = Mat(Size(img_bgr.cols, img_bgr.rows), CV_8UC1);
  Mat img_hsv = Mat(Size(img_bgr.cols, img_bgr.rows), CV_8UC3);

  Mat(img_bgr, Rect(0, 0, sz.width, sz.height));

  /*
   * If source image is three channels, extract the third representing the
   * Variance, else use the source image.
   * */
  if (img_bgr.channels() == 3) {
    cvtColor(img_bgr, img_hsv, CV_BGR2HSV);
    int from_to[] = {2, 0};
    Mat out[] = {gray};
    mixChannels(&img_hsv, 1, out, 1, from_to, 1);
  } else {
    gray = img_bgr.clone();
  }

  int width = gray.cols;
  int height = gray.rows;
  int step = gray.step;

  uchar *data = gray.data;

  const float range[] = {0, 255};
  const float *ranges = {range};
  double max_value = 0, min_value = 0;
  float left[256], right[256], T[256];
  const int channels = 0;
  const int hist_size = 256;
  float N;

  //        hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1);
  //        hist_glg = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1);
  //        cvClearHist(hist_glg);

  /* Creates histogram of intensities in the image (grayscale/variance)*/
  calcHist(&gray, 1, &channels, noArray(), hist, 1, &hist_size, &ranges);

  cout << hist.rows << ":" << hist.cols << ":" << hist.channels() << ":"
       << hist.depth() << ":" << hist.type() << endl;

  //  equalizeHist(gray, gray);
  //        cvCalcHist(&gray, hist, 0, NULL);
  minMaxIdx(hist, &min_value, &max_value, 0, 0);
  // cvZero( hist_img );

  cout << "Hist calcucated: " << endl;

  int bin_glg;

  for (s = 0; s < hist_size; s++) {
    int bin_val = hist.at<int>(s);

    if (bin_val != 0) {
      bin_glg = hist_glg.at<int>(n);
      left[n] = s;
      right[n] = s;
      n++;
    }
  }

  cout << "Hist traversed " << endl;
  for (i = n - 40; i > 0; i--) {
    mergeLevels(hist_glg, &max_value, &n, left, right);
  }

  cout << "Hist merged " << endl;

  lookUpTable(&N, left, right, &n, T);

  cout << "Hist Looked " << endl;
  for (i = 0; i < height; i++) {
    for (j = 0; j < width; j++) {
      data[i * step + j] = T[data[i * step + j]];
    }
  }

  cout << "Hist data " << endl;

  if (img_bgr.channels() == 3) {
    //    copy = gray.clone();
    int from_to[] = {0, 2};
    Mat out[] = {img_hsv};
    mixChannels(&gray, 1, out, 1, from_to, 1);
    //    cvSetImageCOI(copy, 0);
    cvtColor(img_hsv, img_bgr, CV_HSV2BGR, 0);
  } else {
    img_bgr = gray.clone();
  }
}
Example #22
0
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize,
                    double scale, double delta, int borderType )
{
    int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
    if (ddepth < 0)
        ddepth = sdepth;
    _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );

#ifdef HAVE_TEGRA_OPTIMIZATION
    if (scale == 1.0 && delta == 0)
    {
        Mat src = _src.getMat(), dst = _dst.getMat();
        if (ksize == 1 && tegra::laplace1(src, dst, borderType))
            return;
        if (ksize == 3 && tegra::laplace3(src, dst, borderType))
            return;
        if (ksize == 5 && tegra::laplace5(src, dst, borderType))
            return;
    }
#endif

    if( ksize == 1 || ksize == 3 )
    {
        float K[2][9] =
        {
            { 0, 1, 0, 1, -4, 1, 0, 1, 0 },
            { 2, 0, 2, 0, -8, 0, 2, 0, 2 }
        };
        Mat kernel(3, 3, CV_32F, K[ksize == 3]);
        if( scale != 1 )
            kernel *= scale;
        filter2D( _src, _dst, ddepth, kernel, Point(-1, -1), delta, borderType );
    }
    else
    {
        Mat src = _src.getMat(), dst = _dst.getMat();
        const size_t STRIPE_SIZE = 1 << 14;

        int depth = src.depth();
        int ktype = std::max(CV_32F, std::max(ddepth, depth));
        int wdepth = depth == CV_8U && ksize <= 5 ? CV_16S : depth <= CV_32F ? CV_32F : CV_64F;
        int wtype = CV_MAKETYPE(wdepth, src.channels());
        Mat kd, ks;
        getSobelKernels( kd, ks, 2, 0, ksize, false, ktype );
        if( ddepth < 0 )
            ddepth = src.depth();
        int dtype = CV_MAKETYPE(ddepth, src.channels());

        int dy0 = std::min(std::max((int)(STRIPE_SIZE/(getElemSize(src.type())*src.cols)), 1), src.rows);
        Ptr<FilterEngine> fx = createSeparableLinearFilter(src.type(),
            wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );
        Ptr<FilterEngine> fy = createSeparableLinearFilter(src.type(),
            wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() );

        int y = fx->start(src), dsty = 0, dy = 0;
        fy->start(src);
        const uchar* sptr = src.data + y*src.step;

        Mat d2x( dy0 + kd.rows - 1, src.cols, wtype );
        Mat d2y( dy0 + kd.rows - 1, src.cols, wtype );

        for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy )
        {
            fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step );
            dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step );
            if( dy > 0 )
            {
                Mat dstripe = dst.rowRange(dsty, dsty + dy);
                d2x.rows = d2y.rows = dy; // modify the headers, which should work
                d2x += d2y;
                d2x.convertTo( dstripe, dtype, scale, delta );
            }
        }
    }
}
Example #23
0
void alphaBlend(const Mat& src1, const Mat& src2, const Mat& alpha, Mat& dest)
{
	int T;
	Mat s1,s2;
	if(src1.channels()<=src2.channels())T=src2.type();
	else T=src1.type();
	if(dest.empty()) dest=Mat::zeros(src1.size(),T);
	if(dest.type()!=T)dest=Mat::zeros(src1.size(),T);
	if(src1.channels()==src2.channels())
	{
		s1=src1;
		s2=src2;
	}
	else if(src2.channels()==3)
	{
		cvtColor(src1,s1,CV_GRAY2BGR);
		s2=src2;
	}
	else
	{
		cvtColor(src2,s2,CV_GRAY2BGR);
		s1=src1;
	}
	Mat a;
	if(alpha.depth()==CV_8U && s1.channels()==1)
	{
		//alpha.convertTo(a,CV_32F,1.0/255.0);
		alphaBlendSSE_8u(src1,src2,alpha,dest);
		return ;
	}
	else if(alpha.depth()==CV_8U)
	{
		alpha.convertTo(a,CV_32F,1.0/255);
	}
	else 
	{
		alpha.copyTo(a);
	}

	if(dest.channels()==3)
	{
		vector<Mat> ss1(3),ss2(3);
		vector<Mat> ss1f(3),ss2f(3);
		split(s1,ss1);
		split(s2,ss2);	
		for(int c=0;c<3;c++)
		{
			ss1[c].convertTo(ss1f[c],CV_32F);
			ss2[c].convertTo(ss2f[c],CV_32F);
		}
		{
			float* s1r = ss1f[0].ptr<float>(0);
			float* s2r = ss2f[0].ptr<float>(0);

			float* s1g = ss1f[1].ptr<float>(0);
			float* s2g = ss2f[1].ptr<float>(0);

			float* s1b = ss1f[2].ptr<float>(0);
			float* s2b = ss2f[2].ptr<float>(0);


			float* al = a.ptr<float>(0);
			const int size = src1.size().area()/4;
			const int sizeRem = src1.size().area()-size*4;

			const __m128 ones = _mm_set1_ps(1.0f);

			for(int i=size;i--;)
			{
				const __m128 msa = _mm_load_ps(al);
				const __m128 imsa = _mm_sub_ps(ones,msa);
				__m128 ms1 = _mm_load_ps(s1r);
				__m128 ms2 = _mm_load_ps(s2r);
				ms1 = _mm_mul_ps(ms1,msa);
				ms2 = _mm_mul_ps(ms2,imsa);
				ms1 = _mm_add_ps(ms1,ms2);
				_mm_store_ps(s1r,ms1);//store ss1f

				ms1 = _mm_load_ps(s1g);
				ms2 = _mm_load_ps(s2g);
				ms1 = _mm_mul_ps(ms1,msa);
				ms2 = _mm_mul_ps(ms2,imsa);
				ms1 = _mm_add_ps(ms1,ms2);
				_mm_store_ps(s1g,ms1);//store ss1f

				ms1 = _mm_load_ps(s1b);
				ms2 = _mm_load_ps(s2b);
				ms1 = _mm_mul_ps(ms1,msa);
				ms2 = _mm_mul_ps(ms2,imsa);
				ms1 = _mm_add_ps(ms1,ms2);
				_mm_store_ps(s1b,ms1);//store ss1f

				al+=4,s1r+=4,s2r+=4,s1g+=4,s2g+=4,s1b+=4,s2b+=4;
			}
			for(int i=0;i<sizeRem;i++)
			{
				*s1r= *al * *s1r +(1.f-*al) * *s2r;
				*s1g= *al * *s1g +(1.f-*al) * *s2g;
				*s1b= *al * *s1b +(1.f-*al) * *s2b;

				al++,s1r++,s2r++,s1g++,s2g++,s1b++,s2b++;
			}
			for(int c=0;c<3;c++)
			{
				ss1f[c].convertTo(ss1[c],CV_8U);
			}
			merge(ss1,dest);
		}
	}
	else if(dest.channels()==1)
	{
		Mat ss1f,ss2f;
		s1.convertTo(ss1f,CV_32F);
		s2.convertTo(ss2f,CV_32F);
		{
			float* s1r = ss1f.ptr<float>(0);
			float* s2r = ss2f.ptr<float>(0);
			float* al = a.ptr<float>(0);
			const int size = src1.size().area()/4;
			const int nn = src1.size().area() - size*4;
			const __m128 ones = _mm_set1_ps(1.0f);
			for(int i=size;i--;)
			{
				const __m128 msa = _mm_load_ps(al);
				const __m128 imsa = _mm_sub_ps(ones,msa);
				__m128 ms1 = _mm_load_ps(s1r);
				__m128 ms2 = _mm_load_ps(s2r);
				ms1 = _mm_mul_ps(ms1,msa);
				ms2 = _mm_mul_ps(ms2,imsa);
				ms1 = _mm_add_ps(ms1,ms2);
				_mm_store_ps(s1r,ms1);//store ss1f

				al+=4,s1r+=4,s2r+=4;
			}
			for(int i=nn;i--;)
			{
				*s1r = *al * *s1r + (1.0f-*al)* *s2r;
				al++,s1r++,s2r++;
			}
			if(src1.depth()==CV_32F)
				ss1f.copyTo(dest);
			else
				ss1f.convertTo(dest,src1.depth());
		}
	}
}
Example #24
0
// Create a grayscale face image that has a standard size and contrast & brightness.
// "srcImg" should be a copy of the whole color camera frame, so that it can draw the eye positions onto.
// If 'doLeftAndRightSeparately' is true, it will process left & right sides seperately,
// so that if there is a strong light on one side but not the other, it will still look OK.
// Performs Face Preprocessing as a combination of:
//  - geometrical scaling, rotation and translation using Eye Detection,
//  - smoothing away image noise using a Bilateral Filter,
//  - standardize the brightness on both left and right sides of the face independently using separated Histogram Equalization,
//  - removal of background and hair using an Elliptical Mask.
// Returns either a preprocessed face square image or NULL (ie: couldn't detect the face and 2 eyes).
// If a face is found, it can store the rect coordinates into 'storeFaceRect' and 'storeLeftEye' & 'storeRightEye' if given,
// and eye search regions into 'searchedLeftEye' & 'searchedRightEye' if given.
Mat getPreprocessedFace(Mat &srcImg, int desiredFaceWidth, CascadeClassifier &faceCascade, CascadeClassifier &eyeCascade1, CascadeClassifier &eyeCascade2, bool doLeftAndRightSeparately, Rect *storeFaceRect, Point *storeLeftEye, Point *storeRightEye, Rect *searchedLeftEye, Rect *searchedRightEye)
{
    // Use square faces.
    int desiredFaceHeight = desiredFaceWidth;

    // Mark the detected face region and eye search regions as invalid, in case they aren't detected.
    if (storeFaceRect)
        storeFaceRect->width = -1;
    if (storeLeftEye)
        storeLeftEye->x = -1;
    if (storeRightEye)
        storeRightEye->x= -1;
    if (searchedLeftEye)
        searchedLeftEye->width = -1;
    if (searchedRightEye)
        searchedRightEye->width = -1;

    // Find the largest face.
    Rect faceRect;
    detectLargestObject(srcImg, faceCascade, faceRect);

    // Check if a face was detected.
    if (faceRect.width > 0) {

        // Give the face rect to the caller if desired.
        if (storeFaceRect)
            *storeFaceRect = faceRect;

        Mat faceImg = srcImg(faceRect);    // Get the detected face image.

        // If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
        Mat gray;
        if (faceImg.channels() == 3) {
            cvtColor(faceImg, gray, CV_BGR2GRAY);
        }
        else if (faceImg.channels() == 4) {
            cvtColor(faceImg, gray, CV_BGRA2GRAY);
        }
        else {
            // Access the input image directly, since it is already grayscale.
            gray = faceImg;
        }

        // Search for the 2 eyes at the full resolution, since eye detection needs max resolution possible!
        Point leftEye, rightEye;
        detectBothEyes(gray, eyeCascade1, eyeCascade2, leftEye, rightEye, searchedLeftEye, searchedRightEye);

        // Give the eye results to the caller if desired.
        if (storeLeftEye)
            *storeLeftEye = leftEye;
        if (storeRightEye)
            *storeRightEye = rightEye;

        // Check if both eyes were detected.
        if (leftEye.x >= 0 && rightEye.x >= 0) {

            // Make the face image the same size as the training images.

            // Since we found both eyes, lets rotate & scale & translate the face so that the 2 eyes
            // line up perfectly with ideal eye positions. This makes sure that eyes will be horizontal,
            // and not too far left or right of the face, etc.

            // Get the center between the 2 eyes.
            Point2f eyesCenter = Point2f( (leftEye.x + rightEye.x) * 0.5f, (leftEye.y + rightEye.y) * 0.5f );
            // Get the angle between the 2 eyes.
            double dy = (rightEye.y - leftEye.y);
            double dx = (rightEye.x - leftEye.x);
            double len = sqrt(dx*dx + dy*dy);
            double angle = atan2(dy, dx) * 180.0/CV_PI; // Convert from radians to degrees.

            // Hand measurements shown that the left eye center should ideally be at roughly (0.19, 0.14) of a scaled face image.
            const double DESIRED_RIGHT_EYE_X = (1.0f - DESIRED_LEFT_EYE_X);
            // Get the amount we need to scale the image to be the desired fixed size we want.
            double desiredLen = (DESIRED_RIGHT_EYE_X - DESIRED_LEFT_EYE_X) * desiredFaceWidth;
            double scale = desiredLen / len;
            // Get the transformation matrix for rotating and scaling the face to the desired angle & size.
            Mat rot_mat = getRotationMatrix2D(eyesCenter, angle, scale);
            // Shift the center of the eyes to be the desired center between the eyes.
            rot_mat.at<double>(0, 2) += desiredFaceWidth * 0.5f - eyesCenter.x;
            rot_mat.at<double>(1, 2) += desiredFaceHeight * DESIRED_LEFT_EYE_Y - eyesCenter.y;

            // Rotate and scale and translate the image to the desired angle & size & position!
            // Note that we use 'w' for the height instead of 'h', because the input face has 1:1 aspect ratio.
            Mat warped = Mat(desiredFaceHeight, desiredFaceWidth, CV_8U, Scalar(128)); // Clear the output image to a default grey.
            warpAffine(gray, warped, rot_mat, warped.size());
            //imshow("warped", warped);

            // Give the image a standard brightness and contrast, in case it was too dark or had low contrast.
            if (!doLeftAndRightSeparately) {
                // Do it on the whole face.
                equalizeHist(warped, warped);
            }
            else {
                // Do it seperately for the left and right sides of the face.
                equalizeLeftAndRightHalves(warped);
            }
            //imshow("equalized", warped);

            // Use the "Bilateral Filter" to reduce pixel noise by smoothing the image, but keeping the sharp edges in the face.
            Mat filtered = Mat(warped.size(), CV_8U);
            bilateralFilter(warped, filtered, 0, 20.0, 2.0);
            //imshow("filtered", filtered);

            // Filter out the corners of the face, since we mainly just care about the middle parts.
            // Draw a filled ellipse in the middle of the face-sized image.
            Mat mask = Mat(warped.size(), CV_8U, Scalar(0)); // Start with an empty mask.
            Point faceCenter = Point( desiredFaceWidth/2, cvRound(desiredFaceHeight * FACE_ELLIPSE_CY) );
            Size size = Size( cvRound(desiredFaceWidth * FACE_ELLIPSE_W), cvRound(desiredFaceHeight * FACE_ELLIPSE_H) );
            ellipse(mask, faceCenter, size, 0, 0, 360, Scalar(255), CV_FILLED);
            //imshow("mask", mask);

            // Use the mask, to remove outside pixels.
            Mat dstImg = Mat(warped.size(), CV_8U, Scalar(128)); // Clear the output image to a default gray.
            /*
            namedWindow("filtered");
            imshow("filtered", filtered);
            namedWindow("dstImg");
            imshow("dstImg", dstImg);
            namedWindow("mask");
            imshow("mask", mask);
            */
            // Apply the elliptical mask on the face.
            filtered.copyTo(dstImg, mask);  // Copies non-masked pixels from filtered to dstImg.
            //imshow("dstImg", dstImg);

            return dstImg;
        }
        /*
        else {
            // Since no eyes were found, just do a generic image resize.
            resize(gray, tmpImg, Size(w,h));
        }
        */
    }
    return Mat();
}
Example #25
0
bool  PngDecoder::readData( Mat& img )
{
    volatile bool result = false;
    AutoBuffer<uchar*> _buffer(m_height);
    uchar** buffer = _buffer;
    int color = img.channels() > 1;

    if( m_png_ptr && m_info_ptr && m_end_info && m_width && m_height )
    {
        png_structp png_ptr = (png_structp)m_png_ptr;
        png_infop info_ptr = (png_infop)m_info_ptr;
        png_infop end_info = (png_infop)m_end_info;

        if( setjmp( png_jmpbuf ( png_ptr ) ) == 0 )
        {
            int y;

            if( img.depth() == CV_8U && m_bit_depth == 16 )
                png_set_strip_16( png_ptr );
            else if( !isBigEndian() )
                png_set_swap( png_ptr );

            if(img.channels() < 4)
            {
                /* observation: png_read_image() writes 400 bytes beyond
                 * end of data when reading a 400x118 color png
                 * "mpplus_sand.png".  OpenCV crashes even with demo
                 * programs.  Looking at the loaded image I'd say we get 4
                 * bytes per pixel instead of 3 bytes per pixel.  Test
                 * indicate that it is a good idea to always ask for
                 * stripping alpha..  18.11.2004 Axel Walthelm
                 */
                 png_set_strip_alpha( png_ptr );
            }

            if( m_color_type == PNG_COLOR_TYPE_PALETTE )
                png_set_palette_to_rgb( png_ptr );

            if( m_color_type == PNG_COLOR_TYPE_GRAY && m_bit_depth < 8 )
#if (PNG_LIBPNG_VER_MAJOR*10000 + PNG_LIBPNG_VER_MINOR*100 + PNG_LIBPNG_VER_RELEASE >= 10209) || \
    (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR == 0 && PNG_LIBPNG_VER_RELEASE >= 18)
                png_set_expand_gray_1_2_4_to_8( png_ptr );
#else
                png_set_gray_1_2_4_to_8( png_ptr );
#endif

            if( CV_MAT_CN(m_type) > 1 && color )
                png_set_bgr( png_ptr ); // convert RGB to BGR
            else if( color )
                png_set_gray_to_rgb( png_ptr ); // Gray->RGB
            else
                png_set_rgb_to_gray( png_ptr, 1, 0.299, 0.587 ); // RGB->Gray

            png_set_interlace_handling( png_ptr );
            png_read_update_info( png_ptr, info_ptr );

            for( y = 0; y < m_height; y++ )
                buffer[y] = img.data + y*img.step;

            png_read_image( png_ptr, buffer );
            png_read_end( png_ptr, end_info );

            result = true;
        }
    }

    close();
    return result;
}
// Search for objects such as faces in the image using the given parameters, storing the multiple cv::Rects into 'objects'.
// Can use Haar cascades or LBP cascades for Face Detection, or even eye, mouth, or car detection.
// Input is temporarily shrunk to 'scaledWidth' for much faster detection, since 200 is enough to find faces.
void detectObjectsCustom(const Mat &img, CascadeClassifier &cascade, vector<Rect> &objects, int scaledWidth, int flags, Size minFeatureSize, float searchScaleFactor, int minNeighbors)
{
    //imshow("img", img);

    // If the input image is not grayscale, then convert the BGR or BGRA color image to grayscale.
    Mat gray;
    if (img.channels() == 3) {
        cvtColor(img, gray, CV_BGR2GRAY);
    }
    else if (img.channels() == 4) {
        cvtColor(img, gray, CV_BGRA2GRAY);
    }
    else {
        // Access the input image directly, since it is already grayscale.
        gray = img;
    }
    //imshow("gray", gray);

    // Possibly shrink the image, to run much faster.
    Mat inputImg;
    float scale = img.cols / (float)scaledWidth;
    if (img.cols > scaledWidth) {
        // Shrink the image while keeping the same aspect ratio.
        int scaledHeight = cvRound(img.rows / scale);
        resize(gray, inputImg, Size(scaledWidth, scaledHeight));
    }
    else {
        // Access the input image directly, since it is already small.
        inputImg = gray;
    }

    //imshow("inputImg", inputImg);

    // Standardize the brightness and contrast to improve dark images.
    Mat equalizedImg;
    equalizeHist(inputImg, equalizedImg);
    //imshow("equalizedImg", equalizedImg);

    // Detect objects in the small grayscale image.
    cascade.detectMultiScale(equalizedImg, objects, searchScaleFactor, minNeighbors, flags, minFeatureSize);

    // Enlarge the results if the image was temporarily shrunk before detection.
    if (img.cols > scaledWidth) {
        for (int i = 0; i < (int)objects.size(); i++ ) {
            objects[i].x = cvRound(objects[i].x * scale);
            objects[i].y = cvRound(objects[i].y * scale);
            objects[i].width = cvRound(objects[i].width * scale);
            objects[i].height = cvRound(objects[i].height * scale);
        }
    }

    // Make sure the object is completely within the image, in case it was on a border.
    for (int i = 0; i < (int)objects.size(); i++ ) {
        if (objects[i].x < 0)
            objects[i].x = 0;
        if (objects[i].y < 0)
            objects[i].y = 0;
        if (objects[i].x + objects[i].width > img.cols)
            objects[i].x = img.cols - objects[i].width;
        if (objects[i].y + objects[i].height > img.rows)
            objects[i].y = img.rows - objects[i].height;
    }

    // Return with the detected face rectangles stored in "objects".
}
int ns__getMatDetail(  struct soap *soap, 
			std::string InputMatFilename,
			ns__MatDetail &detail)
{
	bool timeChecking, memoryChecking;
	getConfig(timeChecking, memoryChecking);
	if(timeChecking){
		start = omp_get_wtime();
	}

    /* read from bin */
    Mat src;
	if(!readMat(InputMatFilename, src))
    {
		Log(logERROR) << "getMatDetail :: can not read bin file for src" << std::endl;
        return soap_receiver_fault(soap, "getMatDetail :: can not read bin file for src", NULL);
    }
	
	detail.columns = src.cols;
	detail.rows = src.rows;

/*	
CV_8U - 8-bit unsigned integers ( 0..255 )
CV_8S - 8-bit signed integers ( -128..127 )
CV_16U - 16-bit unsigned integers ( 0..65535 )
CV_16S - 16-bit signed integers ( -32768..32767 )
CV_32S - 32-bit signed integers ( -2147483648..2147483647 )
CV_32F - 32-bit floating-point numbers ( -FLT_MAX..FLT_MAX, INF, NAN )
CV_64F - 64-bit floating-point numbers ( -DBL_MAX..DBL_MAX, INF, NAN )
*/
	int d = src.depth();
	if(d == CV_8U)
        detail.depth = "CV_8U";
	else if(d == CV_8S)
        detail.depth = "CV_8S";
	else if(d == CV_16U)
        detail.depth = "CV_16U";
	else if(d == CV_16S)
        detail.depth = "CV_16S";
	else if(d == CV_32S)
        detail.depth = "CV_32S";
    else if(d == CV_32F)
        detail.depth = "CV_32F";
	else if(d == CV_64F)
        detail.depth = "CV_64F";
	else
        detail.depth = "unknown type";

	
	int t = src.type();
	if(t == CV_8UC1)
        detail.type = "CV_8UC1";
    else if(t == CV_8UC2)
        detail.type = "CV_8UC2";
	else if(t == CV_8UC3)
        detail.type = "CV_8UC3";
    else if(t == CV_32FC1)
        detail.type = "CV_32FC1";
	else if(t == CV_32FC2)
        detail.type = "CV_32FC2";
    else if(t == CV_32FC3)
        detail.type = "CV_32FC3";
	else
		detail.type = "unknown type";
	
	detail.channel = src.channels();
	detail.empty = src.empty();
	
    src.release();

	if(timeChecking) 
	{ 
		end = omp_get_wtime();
		Log(logINFO) << "getMatDetail :: " << "time elapsed " << end-start << std::endl;
	}
	
	if(memoryChecking)
	{	
		double vm, rss;
		getMemoryUsage(vm, rss);
		Log(logINFO)<< "getMatDetail :: VM usage :" << vm << std::endl 
					<< "Resident set size :" << rss << std::endl;
	}

    return SOAP_OK;
}
Example #28
0
void cv::gpu::LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& s)
{
    class LevelsInit
    {
    public:
        Npp32s pLevels[256];
        const Npp32s* pLevels3[3];
        int nValues3[3];

#if (CUDA_VERSION > 4020)
        GpuMat d_pLevels;
#endif

        LevelsInit()
        {
            nValues3[0] = nValues3[1] = nValues3[2] = 256;
            for (int i = 0; i < 256; ++i)
                pLevels[i] = i;


#if (CUDA_VERSION <= 4020)
            pLevels3[0] = pLevels3[1] = pLevels3[2] = pLevels;
#else
            d_pLevels.upload(Mat(1, 256, CV_32S, pLevels));
            pLevels3[0] = pLevels3[1] = pLevels3[2] = d_pLevels.ptr<Npp32s>();
#endif
        }
    };
    static LevelsInit lvls;

    int cn = src.channels();

    CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3);
    CV_Assert(lut.depth() == CV_8U && (lut.channels() == 1 || lut.channels() == cn) && lut.rows * lut.cols == 256 && lut.isContinuous());

    dst.create(src.size(), CV_MAKETYPE(lut.depth(), cn));

    NppiSize sz;
    sz.height = src.rows;
    sz.width = src.cols;

    Mat nppLut;
    lut.convertTo(nppLut, CV_32S);

    cudaStream_t stream = StreamAccessor::getStream(s);

    NppStreamHandler h(stream);

    if (src.type() == CV_8UC1)
    {
#if (CUDA_VERSION <= 4020)
        nppSafeCall( nppiLUT_Linear_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step),
            dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, nppLut.ptr<Npp32s>(), lvls.pLevels, 256) );
#else
        GpuMat d_nppLut(Mat(1, 256, CV_32S, nppLut.data));
        nppSafeCall( nppiLUT_Linear_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step),
            dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, d_nppLut.ptr<Npp32s>(), lvls.d_pLevels.ptr<Npp32s>(), 256) );
#endif
    }
    else
    {
        const Npp32s* pValues3[3];

        Mat nppLut3[3];
        if (nppLut.channels() == 1)
        {
#if (CUDA_VERSION <= 4020)
            pValues3[0] = pValues3[1] = pValues3[2] = nppLut.ptr<Npp32s>();
#else
            GpuMat d_nppLut(Mat(1, 256, CV_32S, nppLut.data));
            pValues3[0] = pValues3[1] = pValues3[2] = d_nppLut.ptr<Npp32s>();
#endif
        }
        else
        {
            cv::split(nppLut, nppLut3);

#if (CUDA_VERSION <= 4020)
            pValues3[0] = nppLut3[0].ptr<Npp32s>();
            pValues3[1] = nppLut3[1].ptr<Npp32s>();
            pValues3[2] = nppLut3[2].ptr<Npp32s>();
#else
            GpuMat d_nppLut0(Mat(1, 256, CV_32S, nppLut3[0].data));
            GpuMat d_nppLut1(Mat(1, 256, CV_32S, nppLut3[1].data));
            GpuMat d_nppLut2(Mat(1, 256, CV_32S, nppLut3[2].data));

            pValues3[0] = d_nppLut0.ptr<Npp32s>();
            pValues3[1] = d_nppLut1.ptr<Npp32s>();
            pValues3[2] = d_nppLut2.ptr<Npp32s>();
#endif
        }

        nppSafeCall( nppiLUT_Linear_8u_C3R(src.ptr<Npp8u>(), static_cast<int>(src.step),
            dst.ptr<Npp8u>(), static_cast<int>(dst.step), sz, pValues3, lvls.pLevels3, lvls.nValues3) );
    }

    if (stream == 0)
        cudaSafeCall( cudaDeviceSynchronize() );
}
Example #29
0
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria )
{
    CV_INSTRUMENT_REGION()

    Size size;
    int cn;
    Mat mat;
    UMat umat;
    bool isUMat = _probImage.isUMat();

    if (isUMat)
        umat = _probImage.getUMat(), cn = umat.channels(), size = umat.size();
    else
        mat = _probImage.getMat(), cn = mat.channels(), size = mat.size();

    Rect cur_rect = window;

    CV_Assert( cn == 1 );

    if( window.height <= 0 || window.width <= 0 )
        CV_Error( Error::StsBadArg, "Input window has non-positive sizes" );

    window = window & Rect(0, 0, size.width, size.height);

    double eps = (criteria.type & TermCriteria::EPS) ? std::max(criteria.epsilon, 0.) : 1.;
    eps = cvRound(eps*eps);
    int i, niters = (criteria.type & TermCriteria::MAX_ITER) ? std::max(criteria.maxCount, 1) : 100;

    for( i = 0; i < niters; i++ )
    {
        cur_rect = cur_rect & Rect(0, 0, size.width, size.height);
        if( cur_rect == Rect() )
        {
            cur_rect.x = size.width/2;
            cur_rect.y = size.height/2;
        }
        cur_rect.width = std::max(cur_rect.width, 1);
        cur_rect.height = std::max(cur_rect.height, 1);

        Moments m = isUMat ? moments(umat(cur_rect)) : moments(mat(cur_rect));

        // Calculating center of mass
        if( fabs(m.m00) < DBL_EPSILON )
            break;

        int dx = cvRound( m.m10/m.m00 - window.width*0.5 );
        int dy = cvRound( m.m01/m.m00 - window.height*0.5 );

        int nx = std::min(std::max(cur_rect.x + dx, 0), size.width - cur_rect.width);
        int ny = std::min(std::max(cur_rect.y + dy, 0), size.height - cur_rect.height);

        dx = nx - cur_rect.x;
        dy = ny - cur_rect.y;
        cur_rect.x = nx;
        cur_rect.y = ny;

        // Check for coverage centers mass & window
        if( dx*dx + dy*dy < eps )
            break;
    }

    window = cur_rect;
    return i;
}
Example #30
0
bool  PxMDecoder::readData( Mat& img )
{
    int color = img.channels() > 1;
    uchar* data = img.ptr();
    int step = (int)img.step;
    PaletteEntry palette[256];
    bool   result = false;
    int  bit_depth = CV_ELEM_SIZE1(m_type)*8;
    int  src_pitch = (m_width*m_bpp*bit_depth/8 + 7)/8;
    int  nch = CV_MAT_CN(m_type);
    int  width3 = m_width*nch;
    int  i, x, y;

    if( m_offset < 0 || !m_strm.isOpened())
        return false;

    AutoBuffer<uchar> _src(src_pitch + 32);
    uchar* src = _src;
    AutoBuffer<uchar> _gray_palette;
    uchar* gray_palette = _gray_palette;

    // create LUT for converting colors
    if( bit_depth == 8 )
    {
        _gray_palette.allocate(m_maxval + 1);
        gray_palette = _gray_palette;

        for( i = 0; i <= m_maxval; i++ )
            gray_palette[i] = (uchar)((i*255/m_maxval)^(m_bpp == 1 ? 255 : 0));

        FillGrayPalette( palette, m_bpp==1 ? 1 : 8 , m_bpp == 1 );
    }

    try
    {
        m_strm.setPos( m_offset );

        switch( m_bpp )
        {
        ////////////////////////// 1 BPP /////////////////////////
        case 1:
            if( !m_binary )
            {
                for( y = 0; y < m_height; y++, data += step )
                {
                    for( x = 0; x < m_width; x++ )
                        src[x] = ReadNumber( m_strm, 1 ) != 0;

                    if( color )
                        FillColorRow8( data, src, m_width, palette );
                    else
                        FillGrayRow8( data, src, m_width, gray_palette );
                }
            }
            else
            {
                for( y = 0; y < m_height; y++, data += step )
                {
                    m_strm.getBytes( src, src_pitch );

                    if( color )
                        FillColorRow1( data, src, m_width, palette );
                    else
                        FillGrayRow1( data, src, m_width, gray_palette );
                }
            }
            result = true;
            break;

        ////////////////////////// 8 BPP /////////////////////////
        case 8:
        case 24:
            for( y = 0; y < m_height; y++, data += step )
            {
                if( !m_binary )
                {
                    for( x = 0; x < width3; x++ )
                    {
                        int code = ReadNumber( m_strm, INT_MAX );
                        if( (unsigned)code > (unsigned)m_maxval ) code = m_maxval;
                        if( bit_depth == 8 )
                            src[x] = gray_palette[code];
                        else
                            ((ushort *)src)[x] = (ushort)code;
                    }
                }
                else
                {
                    m_strm.getBytes( src, src_pitch );
                    if( bit_depth == 16 && !isBigEndian() )
                    {
                        for( x = 0; x < width3; x++ )
                        {
                            uchar v = src[x * 2];
                            src[x * 2] = src[x * 2 + 1];
                            src[x * 2 + 1] = v;
                        }
                    }
                }

                if( img.depth() == CV_8U && bit_depth == 16 )
                {
                    for( x = 0; x < width3; x++ )
                    {
                        int v = ((ushort *)src)[x];
                        src[x] = (uchar)(v >> 8);
                    }
                }

                if( m_bpp == 8 ) // image has one channel
                {
                    if( color )
                    {
                        if( img.depth() == CV_8U ) {
                            uchar *d = data, *s = src, *end = src + m_width;
                            for( ; s < end; d += 3, s++)
                                d[0] = d[1] = d[2] = *s;
                        } else {
                            ushort *d = (ushort *)data, *s = (ushort *)src, *end = ((ushort *)src) + m_width;
                            for( ; s < end; s++, d += 3)
                                d[0] = d[1] = d[2] = *s;
                        }
                    }
                    else
                        memcpy( data, src, m_width*(bit_depth/8) );
                }
                else
                {
                    if( color )
                    {
                        if( img.depth() == CV_8U )
                            icvCvt_RGB2BGR_8u_C3R( src, 0, data, 0, cvSize(m_width,1) );
                        else
                            icvCvt_RGB2BGR_16u_C3R( (ushort *)src, 0, (ushort *)data, 0, cvSize(m_width,1) );
                    }
                    else if( img.depth() == CV_8U )
                        icvCvt_BGR2Gray_8u_C3C1R( src, 0, data, 0, cvSize(m_width,1), 2 );
                    else
                        icvCvt_BGRA2Gray_16u_CnC1R( (ushort *)src, 0, (ushort *)data, 0, cvSize(m_width,1), 3, 2 );
                }
            }
            result = true;
            break;
        default:
            assert(0);
        }
    }
    catch(...)
    {
    }

    return result;
}